ws-bom-robot-app 0.0.99__py3-none-any.whl → 0.0.101__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. ws_bom_robot_app/llm/agent_description.py +123 -123
  2. ws_bom_robot_app/llm/agent_handler.py +176 -176
  3. ws_bom_robot_app/llm/agent_lcel.py +111 -50
  4. ws_bom_robot_app/llm/defaut_prompt.py +15 -15
  5. ws_bom_robot_app/llm/feedbacks/feedback_manager.py +66 -66
  6. ws_bom_robot_app/llm/main.py +159 -158
  7. ws_bom_robot_app/llm/models/api.py +2 -1
  8. ws_bom_robot_app/llm/models/feedback.py +30 -30
  9. ws_bom_robot_app/llm/nebuly_handler.py +185 -185
  10. ws_bom_robot_app/llm/tools/tool_builder.py +68 -68
  11. ws_bom_robot_app/llm/tools/tool_manager.py +332 -332
  12. ws_bom_robot_app/llm/tools/utils.py +41 -41
  13. ws_bom_robot_app/llm/utils/agent.py +34 -34
  14. ws_bom_robot_app/llm/utils/cms.py +114 -114
  15. ws_bom_robot_app/llm/utils/download.py +183 -183
  16. ws_bom_robot_app/llm/utils/print.py +29 -29
  17. ws_bom_robot_app/llm/vector_store/generator.py +137 -137
  18. ws_bom_robot_app/llm/vector_store/integration/shopify.py +143 -143
  19. ws_bom_robot_app/llm/vector_store/integration/thron.py +236 -236
  20. ws_bom_robot_app/llm/vector_store/loader/base.py +7 -1
  21. ws_bom_robot_app/llm/vector_store/loader/docling.py +20 -12
  22. ws_bom_robot_app/llm/vector_store/loader/json_loader.py +25 -25
  23. {ws_bom_robot_app-0.0.99.dist-info → ws_bom_robot_app-0.0.101.dist-info}/METADATA +364 -364
  24. {ws_bom_robot_app-0.0.99.dist-info → ws_bom_robot_app-0.0.101.dist-info}/RECORD +26 -26
  25. {ws_bom_robot_app-0.0.99.dist-info → ws_bom_robot_app-0.0.101.dist-info}/WHEEL +0 -0
  26. {ws_bom_robot_app-0.0.99.dist-info → ws_bom_robot_app-0.0.101.dist-info}/top_level.txt +0 -0
@@ -1,137 +1,137 @@
1
- import os, gc, shutil, logging, traceback
2
- import asyncio, aiofiles, aiofiles.os
3
- from fastapi import HTTPException
4
- from fastapi.responses import StreamingResponse
5
- from langchain_core.documents import Document
6
- from ws_bom_robot_app.llm.vector_store.loader.base import Loader
7
- from ws_bom_robot_app.llm.models.api import RulesRequest, KbRequest, VectorDbResponse
8
- from ws_bom_robot_app.llm.vector_store.integration.manager import IntegrationManager
9
- from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
10
- from ws_bom_robot_app.config import config
11
- from ws_bom_robot_app.llm.models.kb import load_endpoints
12
- from ws_bom_robot_app.llm.utils.download import download_files
13
-
14
- async def _cleanup_directory(directory_path: str):
15
- if os.path.exists(directory_path):
16
- await asyncio.to_thread(shutil.rmtree, directory_path)
17
-
18
- #@timer
19
- async def rules(rq: RulesRequest) -> VectorDbResponse:
20
- _config = rq.config()
21
- db_name = rq.out_name()
22
- store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
23
- try:
24
- await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(),[Document(page_content=rule, metadata={"source": "rules"}) for rule in rq.rules], store_path) #type: ignore
25
- db_file_path = shutil.make_archive(os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name), "zip", store_path)
26
- return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
27
- except Exception as e:
28
- try:
29
- await _cleanup_directory(store_path)
30
- finally:
31
- return VectorDbResponse(success = False, error = str(e))
32
- finally:
33
- gc.collect()
34
-
35
- #@atimer
36
- async def kb(rq: KbRequest) -> VectorDbResponse:
37
- os.environ['MPLCONFIGDIR'] = './tmp/.matplotlib'
38
- _config = rq.config()
39
- db_name = rq.out_name()
40
- src_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_src)
41
- working_path = os.path.join(src_path, db_name)
42
-
43
- if all([not rq.files,not rq.endpoints,not rq.integrations]):
44
- return VectorDbResponse(success = False, error = "No files, endpoints or integrations provided")
45
- else:
46
- await aiofiles.os.makedirs(src_path, exist_ok=True)
47
- await aiofiles.os.makedirs(working_path, exist_ok=True)
48
-
49
- documents: list[Document] = []
50
- # Download/copy all files
51
- if rq.files:
52
- try:
53
- loaders = Loader(working_path)
54
- filter_file_extensions = loaders.managed_file_extensions()
55
- files_to_download = [file for file in rq.files if not os.path.exists(os.path.join(src_path, os.path.basename(file)))]
56
- if files_to_download:
57
- await download_files(
58
- [f"{_config.robot_cms_host}/{_config.robot_cms_kb_folder}/{os.path.basename(file)}" for file in files_to_download if any([file.endswith(ext) for ext in filter_file_extensions])],
59
- src_path, authorization=_config.robot_cms_auth)
60
- # copy files to working tmp folder
61
- for file in rq.files:
62
- async with aiofiles.open(os.path.join(src_path, os.path.basename(file)), 'rb') as src_file:
63
- async with aiofiles.open(os.path.join(working_path, os.path.basename(file)), 'wb') as dest_file:
64
- await dest_file.write(await src_file.read())
65
- #load files
66
- try:
67
- documents.extend(await loaders.load())
68
- except Exception as e:
69
- tb = traceback.format_exc()
70
- _error = f"File loader failure: {e} | {tb}"
71
- logging.warning(_error)
72
- return VectorDbResponse(success = False, error = _error)
73
- except Exception as e:
74
- await _cleanup_directory(working_path)
75
- return VectorDbResponse(success = False, error = f"Failed to download file {e}")
76
-
77
- if rq.endpoints:
78
- try:
79
- documents.extend(await load_endpoints(rq.endpoints, working_path))
80
- except Exception as e:
81
- await _cleanup_directory(working_path)
82
- tb = traceback.format_exc()
83
- _error = f"Endpoint failure: {e} | {tb}"
84
- logging.warning(_error)
85
- return VectorDbResponse(success = False, error = _error)
86
-
87
- if rq.integrations:
88
- tasks = []
89
- for integration in rq.integrations:
90
- tasks.append(
91
- IntegrationManager
92
- .get_strategy(integration.type.lower(), working_path, integration.__pydantic_extra__) #type: ignore
93
- .load()
94
- )
95
- try:
96
- integration_documents = await asyncio.gather(*tasks)
97
- for docs in integration_documents:
98
- documents.extend(docs)
99
- except Exception as e:
100
- await _cleanup_directory(working_path)
101
- tb = traceback.format_exc()
102
- _error = f"Integration failure: {e} | {tb}"
103
- logging.warning(_error)
104
- return VectorDbResponse(success=False, error=_error)
105
-
106
- #cleanup
107
- await _cleanup_directory(working_path)
108
-
109
- if documents and len(documents) > 0:
110
- try:
111
- store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
112
- db_file_path = await aiofiles.os.wrap(shutil.make_archive)(
113
- os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name),
114
- "zip",
115
- await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(), documents, store_path, return_folder_path=True)
116
- )
117
- return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
118
- except Exception as e:
119
- await _cleanup_directory(store_path)
120
- return VectorDbResponse(success = False, error = str(e))
121
- finally:
122
- del documents
123
- gc.collect()
124
- else:
125
- _error = "No documents found in the knowledgebase folder"
126
- logging.warning(_error)
127
- return VectorDbResponse(success = False, error = _error)
128
-
129
- async def kb_stream_file(filename: str):
130
- file_path = os.path.join(config.robot_data_folder, config.robot_data_db_folder, config.robot_data_db_folder_out, filename)
131
- if not os.path.isfile(file_path):
132
- raise HTTPException(status_code=404, detail="File not found")
133
- def iter_file():
134
- with open(file_path, mode="rb") as file:
135
- while chunk := file.read(1024*8):
136
- yield chunk
137
- return StreamingResponse(iter_file(), media_type="application/octet-stream", headers={"Content-Disposition": f"attachment; filename={filename}"})
1
+ import os, gc, shutil, logging, traceback
2
+ import asyncio, aiofiles, aiofiles.os
3
+ from fastapi import HTTPException
4
+ from fastapi.responses import StreamingResponse
5
+ from langchain_core.documents import Document
6
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
7
+ from ws_bom_robot_app.llm.models.api import RulesRequest, KbRequest, VectorDbResponse
8
+ from ws_bom_robot_app.llm.vector_store.integration.manager import IntegrationManager
9
+ from ws_bom_robot_app.llm.vector_store.db.manager import VectorDbManager
10
+ from ws_bom_robot_app.config import config
11
+ from ws_bom_robot_app.llm.models.kb import load_endpoints
12
+ from ws_bom_robot_app.llm.utils.download import download_files
13
+
14
+ async def _cleanup_directory(directory_path: str):
15
+ if os.path.exists(directory_path):
16
+ await asyncio.to_thread(shutil.rmtree, directory_path)
17
+
18
+ #@timer
19
+ async def rules(rq: RulesRequest) -> VectorDbResponse:
20
+ _config = rq.config()
21
+ db_name = rq.out_name()
22
+ store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
23
+ try:
24
+ await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(),[Document(page_content=rule, metadata={"source": "rules"}) for rule in rq.rules], store_path) #type: ignore
25
+ db_file_path = shutil.make_archive(os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name), "zip", store_path)
26
+ return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
27
+ except Exception as e:
28
+ try:
29
+ await _cleanup_directory(store_path)
30
+ finally:
31
+ return VectorDbResponse(success = False, error = str(e))
32
+ finally:
33
+ gc.collect()
34
+
35
+ #@atimer
36
+ async def kb(rq: KbRequest) -> VectorDbResponse:
37
+ os.environ['MPLCONFIGDIR'] = './tmp/.matplotlib'
38
+ _config = rq.config()
39
+ db_name = rq.out_name()
40
+ src_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_src)
41
+ working_path = os.path.join(src_path, db_name)
42
+
43
+ if all([not rq.files,not rq.endpoints,not rq.integrations]):
44
+ return VectorDbResponse(success = False, error = "No files, endpoints or integrations provided")
45
+ else:
46
+ await aiofiles.os.makedirs(src_path, exist_ok=True)
47
+ await aiofiles.os.makedirs(working_path, exist_ok=True)
48
+
49
+ documents: list[Document] = []
50
+ # Download/copy all files
51
+ if rq.files:
52
+ try:
53
+ loaders = Loader(working_path)
54
+ filter_file_extensions = loaders.managed_file_extensions()
55
+ files_to_download = [file for file in rq.files if not os.path.exists(os.path.join(src_path, os.path.basename(file)))]
56
+ if files_to_download:
57
+ await download_files(
58
+ [f"{_config.robot_cms_host}/{_config.robot_cms_kb_folder}/{os.path.basename(file)}" for file in files_to_download if any([file.endswith(ext) for ext in filter_file_extensions])],
59
+ src_path, authorization=_config.robot_cms_auth)
60
+ # copy files to working tmp folder
61
+ for file in rq.files:
62
+ async with aiofiles.open(os.path.join(src_path, os.path.basename(file)), 'rb') as src_file:
63
+ async with aiofiles.open(os.path.join(working_path, os.path.basename(file)), 'wb') as dest_file:
64
+ await dest_file.write(await src_file.read())
65
+ #load files
66
+ try:
67
+ documents.extend(await loaders.load())
68
+ except Exception as e:
69
+ tb = traceback.format_exc()
70
+ _error = f"File loader failure: {e} | {tb}"
71
+ logging.warning(_error)
72
+ return VectorDbResponse(success = False, error = _error)
73
+ except Exception as e:
74
+ await _cleanup_directory(working_path)
75
+ return VectorDbResponse(success = False, error = f"Failed to download file {e}")
76
+
77
+ if rq.endpoints:
78
+ try:
79
+ documents.extend(await load_endpoints(rq.endpoints, working_path))
80
+ except Exception as e:
81
+ await _cleanup_directory(working_path)
82
+ tb = traceback.format_exc()
83
+ _error = f"Endpoint failure: {e} | {tb}"
84
+ logging.warning(_error)
85
+ return VectorDbResponse(success = False, error = _error)
86
+
87
+ if rq.integrations:
88
+ tasks = []
89
+ for integration in rq.integrations:
90
+ tasks.append(
91
+ IntegrationManager
92
+ .get_strategy(integration.type.lower(), working_path, integration.__pydantic_extra__) #type: ignore
93
+ .load()
94
+ )
95
+ try:
96
+ integration_documents = await asyncio.gather(*tasks)
97
+ for docs in integration_documents:
98
+ documents.extend(docs)
99
+ except Exception as e:
100
+ await _cleanup_directory(working_path)
101
+ tb = traceback.format_exc()
102
+ _error = f"Integration failure: {e} | {tb}"
103
+ logging.warning(_error)
104
+ return VectorDbResponse(success=False, error=_error)
105
+
106
+ #cleanup
107
+ await _cleanup_directory(working_path)
108
+
109
+ if documents and len(documents) > 0:
110
+ try:
111
+ store_path = os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_store, db_name)
112
+ db_file_path = await aiofiles.os.wrap(shutil.make_archive)(
113
+ os.path.join(_config.robot_data_folder, _config.robot_data_db_folder, _config.robot_data_db_folder_out, db_name),
114
+ "zip",
115
+ await VectorDbManager.get_strategy(rq.vector_type).create(rq.embeddings(), documents, store_path, return_folder_path=True)
116
+ )
117
+ return VectorDbResponse(file = os.path.basename(db_file_path), vector_type=rq.vector_type)
118
+ except Exception as e:
119
+ await _cleanup_directory(store_path)
120
+ return VectorDbResponse(success = False, error = str(e))
121
+ finally:
122
+ del documents
123
+ gc.collect()
124
+ else:
125
+ _error = "No documents found in the knowledgebase folder"
126
+ logging.warning(_error)
127
+ return VectorDbResponse(success = False, error = _error)
128
+
129
+ async def kb_stream_file(filename: str):
130
+ file_path = os.path.join(config.robot_data_folder, config.robot_data_db_folder, config.robot_data_db_folder_out, filename)
131
+ if not os.path.isfile(file_path):
132
+ raise HTTPException(status_code=404, detail="File not found")
133
+ def iter_file():
134
+ with open(file_path, mode="rb") as file:
135
+ while chunk := file.read(1024*8):
136
+ yield chunk
137
+ return StreamingResponse(iter_file(), media_type="application/octet-stream", headers={"Content-Disposition": f"attachment; filename={filename}"})
@@ -1,143 +1,143 @@
1
- import asyncio, logging, aiohttp
2
- from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy
3
- from langchain_core.documents import Document
4
- from ws_bom_robot_app.llm.vector_store.loader.base import Loader
5
- from typing import List, Union, Optional
6
- from pydantic import BaseModel, Field, AliasChoices, field_validator
7
- import json
8
- import os
9
-
10
- class ShopifyParams(BaseModel):
11
- """
12
- ShopifyParams is a model that defines the parameters required for Shopify integration.
13
-
14
- Attributes:
15
- shop_name (str): The shop name for Shopify.
16
- access_token (str): The access token for Shopify.
17
- graphql_query (Union[str, dict]): The GraphQL query string or dict for Shopify.
18
- """
19
- shop_name: str = Field(validation_alias=AliasChoices("shopName","shop_name"))
20
- access_token: str = Field(validation_alias=AliasChoices("accessToken","access_token"))
21
- graphql_query: Union[str, dict] = Field(validation_alias=AliasChoices("graphqlQuery","graphql_query"))
22
-
23
- @field_validator('graphql_query')
24
- @classmethod
25
- def extract_query_string(cls, v):
26
- """Extract the query string from dict format if needed"""
27
- if isinstance(v, dict) and 'query' in v:
28
- return v['query']
29
- return v
30
-
31
- class Shopify(IntegrationStrategy):
32
- def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
33
- super().__init__(knowledgebase_path, data)
34
- self.__data = ShopifyParams.model_validate(self.data)
35
-
36
- def working_subdirectory(self) -> str:
37
- return 'shopify'
38
-
39
- async def run(self) -> None:
40
- _data = await self.__get_data()
41
- json_file_path = os.path.join(self.working_directory, 'shopify_data.json')
42
- with open(json_file_path, 'w', encoding='utf-8') as f:
43
- json.dump(_data, f, ensure_ascii=False)
44
-
45
- async def load(self) -> list[Document]:
46
- await self.run()
47
- await asyncio.sleep(1)
48
- return await Loader(self.working_directory).load()
49
-
50
- async def __get_data(self, page_size: int = 50) -> List[dict]:
51
- # URL dell'API
52
- url = f"https://{self.__data.shop_name}.myshopify.com/admin/api/2024-07/graphql.json"
53
-
54
- # Headers
55
- headers = {
56
- "X-Shopify-Access-Token": self.__data.access_token,
57
- "Content-Type": "application/json"
58
- }
59
-
60
- all_data: List[dict] = []
61
- has_next_page = True
62
- cursor = None
63
- retry_count = 0
64
- max_retries = 5
65
-
66
- while has_next_page:
67
- # Variables per la query
68
- variables = {
69
- "first": page_size
70
- }
71
-
72
- if cursor:
73
- variables["after"] = cursor
74
-
75
- # Payload della richiesta
76
- payload = {
77
- "query": self.__data.graphql_query,
78
- "variables": variables
79
- }
80
-
81
- try:
82
- # Effettua la richiesta
83
- async with aiohttp.ClientSession() as session:
84
- async with session.post(url, headers=headers, json=payload) as response:
85
- # Controlla se la risposta è JSON
86
- try:
87
- data = await response.json()
88
- except aiohttp.ContentTypeError:
89
- text = await response.text()
90
- logging.error(f"Non-JSON response received. Status code: {response.status}")
91
- logging.error(f"Content: {text}")
92
- raise Exception("Invalid response from API")
93
-
94
- # Gestione del throttling
95
- if "errors" in data:
96
- error = data["errors"][0]
97
- if error.get("extensions", {}).get("code") == "THROTTLED":
98
- retry_count += 1
99
- if retry_count > max_retries:
100
- raise Exception("Too many throttling attempts. Stopping execution.")
101
-
102
- # Aspetta un po' più a lungo ad ogni tentativo
103
- wait_time = 2 ** retry_count # Backoff esponenziale
104
- print(f"Rate limit reached. Waiting {wait_time} seconds... (Attempt {retry_count}/{max_retries})")
105
- await asyncio.sleep(wait_time)
106
- continue
107
- else:
108
- raise Exception(f"GraphQL errors: {data['errors']}")
109
-
110
- # Resetta il contatore dei retry se la richiesta è andata bene
111
- retry_count = 0
112
-
113
- # Estrae i dati
114
- _data = list(data["data"].values())[0]
115
- edges = _data["edges"]
116
- page_info = _data["pageInfo"]
117
-
118
- # Aggiungi i dati alla lista
119
- for edge in edges:
120
- all_data.append(edge["node"])
121
-
122
- # Aggiorna il cursore e il flag per la paginazione
123
- has_next_page = page_info["hasNextPage"]
124
- cursor = page_info["endCursor"]
125
-
126
- print(f"Recuperati {len(edges)} prodotti. Totale: {len(all_data)}")
127
-
128
- # Piccola pausa per evitare di saturare l'API
129
- await asyncio.sleep(0.1)
130
-
131
- except aiohttp.ClientError as e:
132
- logging.error(f"Connection error: {e}")
133
- retry_count += 1
134
- if retry_count <= max_retries:
135
- wait_time = 2 ** retry_count
136
- logging.warning(f"Retrying in {wait_time} seconds...")
137
- await asyncio.sleep(wait_time)
138
- continue
139
- else:
140
- raise Exception("Too many network errors. Stopping execution.")
141
-
142
- logging.info(f"Data retrieval completed! Total data: {len(all_data)}")
143
- return all_data
1
+ import asyncio, logging, aiohttp
2
+ from ws_bom_robot_app.llm.vector_store.integration.base import IntegrationStrategy
3
+ from langchain_core.documents import Document
4
+ from ws_bom_robot_app.llm.vector_store.loader.base import Loader
5
+ from typing import List, Union, Optional
6
+ from pydantic import BaseModel, Field, AliasChoices, field_validator
7
+ import json
8
+ import os
9
+
10
+ class ShopifyParams(BaseModel):
11
+ """
12
+ ShopifyParams is a model that defines the parameters required for Shopify integration.
13
+
14
+ Attributes:
15
+ shop_name (str): The shop name for Shopify.
16
+ access_token (str): The access token for Shopify.
17
+ graphql_query (Union[str, dict]): The GraphQL query string or dict for Shopify.
18
+ """
19
+ shop_name: str = Field(validation_alias=AliasChoices("shopName","shop_name"))
20
+ access_token: str = Field(validation_alias=AliasChoices("accessToken","access_token"))
21
+ graphql_query: Union[str, dict] = Field(validation_alias=AliasChoices("graphqlQuery","graphql_query"))
22
+
23
+ @field_validator('graphql_query')
24
+ @classmethod
25
+ def extract_query_string(cls, v):
26
+ """Extract the query string from dict format if needed"""
27
+ if isinstance(v, dict) and 'query' in v:
28
+ return v['query']
29
+ return v
30
+
31
+ class Shopify(IntegrationStrategy):
32
+ def __init__(self, knowledgebase_path: str, data: dict[str, Union[str,int,list]]):
33
+ super().__init__(knowledgebase_path, data)
34
+ self.__data = ShopifyParams.model_validate(self.data)
35
+
36
+ def working_subdirectory(self) -> str:
37
+ return 'shopify'
38
+
39
+ async def run(self) -> None:
40
+ _data = await self.__get_data()
41
+ json_file_path = os.path.join(self.working_directory, 'shopify_data.json')
42
+ with open(json_file_path, 'w', encoding='utf-8') as f:
43
+ json.dump(_data, f, ensure_ascii=False)
44
+
45
+ async def load(self) -> list[Document]:
46
+ await self.run()
47
+ await asyncio.sleep(1)
48
+ return await Loader(self.working_directory).load()
49
+
50
+ async def __get_data(self, page_size: int = 50) -> List[dict]:
51
+ # URL dell'API
52
+ url = f"https://{self.__data.shop_name}.myshopify.com/admin/api/2024-07/graphql.json"
53
+
54
+ # Headers
55
+ headers = {
56
+ "X-Shopify-Access-Token": self.__data.access_token,
57
+ "Content-Type": "application/json"
58
+ }
59
+
60
+ all_data: List[dict] = []
61
+ has_next_page = True
62
+ cursor = None
63
+ retry_count = 0
64
+ max_retries = 5
65
+
66
+ while has_next_page:
67
+ # Variables per la query
68
+ variables = {
69
+ "first": page_size
70
+ }
71
+
72
+ if cursor:
73
+ variables["after"] = cursor
74
+
75
+ # Payload della richiesta
76
+ payload = {
77
+ "query": self.__data.graphql_query,
78
+ "variables": variables
79
+ }
80
+
81
+ try:
82
+ # Effettua la richiesta
83
+ async with aiohttp.ClientSession() as session:
84
+ async with session.post(url, headers=headers, json=payload) as response:
85
+ # Controlla se la risposta è JSON
86
+ try:
87
+ data = await response.json()
88
+ except aiohttp.ContentTypeError:
89
+ text = await response.text()
90
+ logging.error(f"Non-JSON response received. Status code: {response.status}")
91
+ logging.error(f"Content: {text}")
92
+ raise Exception("Invalid response from API")
93
+
94
+ # Gestione del throttling
95
+ if "errors" in data:
96
+ error = data["errors"][0]
97
+ if error.get("extensions", {}).get("code") == "THROTTLED":
98
+ retry_count += 1
99
+ if retry_count > max_retries:
100
+ raise Exception("Too many throttling attempts. Stopping execution.")
101
+
102
+ # Aspetta un po' più a lungo ad ogni tentativo
103
+ wait_time = 2 ** retry_count # Backoff esponenziale
104
+ print(f"Rate limit reached. Waiting {wait_time} seconds... (Attempt {retry_count}/{max_retries})")
105
+ await asyncio.sleep(wait_time)
106
+ continue
107
+ else:
108
+ raise Exception(f"GraphQL errors: {data['errors']}")
109
+
110
+ # Resetta il contatore dei retry se la richiesta è andata bene
111
+ retry_count = 0
112
+
113
+ # Estrae i dati
114
+ _data = list(data["data"].values())[0]
115
+ edges = _data["edges"]
116
+ page_info = _data["pageInfo"]
117
+
118
+ # Aggiungi i dati alla lista
119
+ for edge in edges:
120
+ all_data.append(edge["node"])
121
+
122
+ # Aggiorna il cursore e il flag per la paginazione
123
+ has_next_page = page_info["hasNextPage"]
124
+ cursor = page_info["endCursor"]
125
+
126
+ print(f"Recuperati {len(edges)} prodotti. Totale: {len(all_data)}")
127
+
128
+ # Piccola pausa per evitare di saturare l'API
129
+ await asyncio.sleep(0.1)
130
+
131
+ except aiohttp.ClientError as e:
132
+ logging.error(f"Connection error: {e}")
133
+ retry_count += 1
134
+ if retry_count <= max_retries:
135
+ wait_time = 2 ** retry_count
136
+ logging.warning(f"Retrying in {wait_time} seconds...")
137
+ await asyncio.sleep(wait_time)
138
+ continue
139
+ else:
140
+ raise Exception("Too many network errors. Stopping execution.")
141
+
142
+ logging.info(f"Data retrieval completed! Total data: {len(all_data)}")
143
+ return all_data