MindsDB 25.3.3.0__py3-none-any.whl → 25.3.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MindsDB might be problematic. Click here for more details.

Files changed (45) hide show
  1. mindsdb/__about__.py +2 -2
  2. mindsdb/api/executor/datahub/datanodes/information_schema_datanode.py +2 -6
  3. mindsdb/api/executor/datahub/datanodes/mindsdb_tables.py +1 -1
  4. mindsdb/api/http/namespaces/agents.py +9 -5
  5. mindsdb/api/http/namespaces/chatbots.py +6 -5
  6. mindsdb/api/http/namespaces/databases.py +5 -6
  7. mindsdb/api/http/namespaces/skills.py +5 -4
  8. mindsdb/api/http/namespaces/views.py +6 -7
  9. mindsdb/integrations/handlers/chromadb_handler/chromadb_handler.py +23 -2
  10. mindsdb/integrations/handlers/confluence_handler/confluence_api_client.py +164 -0
  11. mindsdb/integrations/handlers/confluence_handler/confluence_handler.py +54 -59
  12. mindsdb/integrations/handlers/confluence_handler/confluence_tables.py +753 -0
  13. mindsdb/integrations/handlers/confluence_handler/connection_args.py +8 -8
  14. mindsdb/integrations/handlers/dummy_data_handler/dummy_data_handler.py +16 -6
  15. mindsdb/integrations/handlers/file_handler/tests/test_file_handler.py +64 -83
  16. mindsdb/integrations/handlers/huggingface_handler/requirements.txt +5 -4
  17. mindsdb/integrations/handlers/huggingface_handler/requirements_cpu.txt +5 -5
  18. mindsdb/integrations/handlers/langchain_handler/requirements.txt +1 -1
  19. mindsdb/integrations/handlers/lightwood_handler/requirements.txt +3 -3
  20. mindsdb/integrations/handlers/litellm_handler/requirements.txt +1 -1
  21. mindsdb/integrations/handlers/llama_index_handler/requirements.txt +1 -1
  22. mindsdb/integrations/handlers/ms_one_drive_handler/ms_graph_api_one_drive_client.py +1 -1
  23. mindsdb/integrations/handlers/ms_teams_handler/ms_graph_api_teams_client.py +278 -0
  24. mindsdb/integrations/handlers/ms_teams_handler/ms_teams_handler.py +114 -70
  25. mindsdb/integrations/handlers/ms_teams_handler/ms_teams_tables.py +431 -0
  26. mindsdb/integrations/handlers/mysql_handler/mysql_handler.py +2 -0
  27. mindsdb/integrations/handlers/pgvector_handler/pgvector_handler.py +18 -4
  28. mindsdb/integrations/handlers/ray_serve_handler/ray_serve_handler.py +18 -16
  29. mindsdb/integrations/handlers/snowflake_handler/snowflake_handler.py +26 -1
  30. mindsdb/integrations/libs/vectordatabase_handler.py +2 -2
  31. mindsdb/integrations/utilities/files/file_reader.py +3 -3
  32. mindsdb/integrations/utilities/handlers/api_utilities/microsoft/ms_graph_api_utilities.py +36 -2
  33. mindsdb/integrations/utilities/rag/settings.py +1 -0
  34. mindsdb/interfaces/chatbot/chatbot_controller.py +6 -4
  35. mindsdb/interfaces/jobs/jobs_controller.py +1 -4
  36. mindsdb/interfaces/knowledge_base/controller.py +9 -28
  37. mindsdb/interfaces/knowledge_base/preprocessing/document_preprocessor.py +1 -1
  38. mindsdb/interfaces/skills/skills_controller.py +8 -7
  39. {mindsdb-25.3.3.0.dist-info → mindsdb-25.3.4.1.dist-info}/METADATA +237 -237
  40. {mindsdb-25.3.3.0.dist-info → mindsdb-25.3.4.1.dist-info}/RECORD +43 -41
  41. {mindsdb-25.3.3.0.dist-info → mindsdb-25.3.4.1.dist-info}/WHEEL +1 -1
  42. mindsdb/integrations/handlers/confluence_handler/confluence_table.py +0 -193
  43. mindsdb/integrations/handlers/confluence_handler/requirements.txt +0 -1
  44. {mindsdb-25.3.3.0.dist-info → mindsdb-25.3.4.1.dist-info/licenses}/LICENSE +0 -0
  45. {mindsdb-25.3.3.0.dist-info → mindsdb-25.3.4.1.dist-info}/top_level.txt +0 -0
@@ -4,29 +4,29 @@ from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_T
4
4
 
5
5
 
6
6
  connection_args = OrderedDict(
7
- url={
7
+ api_base={
8
8
  "type": ARG_TYPE.URL,
9
- "description": "Confluence URL",
10
- "label": "url",
9
+ "description": "The base URL of the Confluence instance/server.",
10
+ "label": "Base URL",
11
11
  "required": True
12
12
  },
13
13
  username={
14
14
  "type": ARG_TYPE.STR,
15
- "description": "Confluence username",
16
- "label": "username",
15
+ "description": "The username for the Confluence account.",
16
+ "label": "Username",
17
17
  "required": True
18
18
  },
19
19
  password={
20
20
  "type": ARG_TYPE.STR,
21
- "description": "Password",
22
- "label": "password",
21
+ "description": "The API token for the Confluence account.",
22
+ "label": "Password",
23
23
  "required": True,
24
24
  "secret": True
25
25
  }
26
26
  )
27
27
 
28
28
  connection_args_example = OrderedDict(
29
- url="https://marios.atlassian.net/",
29
+ api_base="https://marios.atlassian.net/",
30
30
  username="your_username",
31
31
  password="access_token"
32
32
  )
@@ -1,4 +1,5 @@
1
1
  import time
2
+ from typing import Optional, List
2
3
 
3
4
  import duckdb
4
5
  from typing import Any
@@ -36,18 +37,27 @@ class DummyHandler(DatabaseHandler):
36
37
  """
37
38
  return HandlerStatusResponse(success=True)
38
39
 
39
- def native_query(self, query: Any) -> HandlerResponse:
40
+ def native_query(self, query: Any, params: Optional[List] = None) -> HandlerResponse:
40
41
  """Receive raw query and act upon it somehow
41
42
 
42
43
  Args:
43
- query (Any): query in native format (str for sql databases,
44
- dict for mongo, etc)
44
+ query (Any): query in native format (str for sql databases, dict for mongo, etc)
45
+ params (Optional[List])
45
46
 
46
47
  Returns:
47
48
  HandlerResponse
48
49
  """
49
50
  con = duckdb.connect(self.db_path)
50
- result_df = con.execute(query).fetchdf()
51
+ if params is not None:
52
+ query = query.replace('%s', '?')
53
+ cur = con.executemany(query, params)
54
+ if cur.rowcount >= 0:
55
+ result_df = cur.fetchdf()
56
+ else:
57
+ con.close()
58
+ return HandlerResponse(RESPONSE_TYPE.OK)
59
+ else:
60
+ result_df = con.execute(query).fetchdf()
51
61
  con.close()
52
62
  return HandlerResponse(RESPONSE_TYPE.TABLE, result_df)
53
63
 
@@ -62,8 +72,8 @@ class DummyHandler(DatabaseHandler):
62
72
  HandlerResponse
63
73
  """
64
74
  renderer = SqlalchemyRender('postgres')
65
- query_str = renderer.get_string(query, with_failback=True)
66
- return self.native_query(query_str)
75
+ query_str, params = renderer.get_exec_params(query, with_failback=True)
76
+ return self.native_query(query_str, params)
67
77
 
68
78
  def get_tables(self) -> HandlerResponse:
69
79
  """Get a list of all the tables in the database
@@ -8,7 +8,6 @@ import pandas
8
8
  import pytest
9
9
  from mindsdb_sql_parser.exceptions import ParsingException
10
10
  from mindsdb_sql_parser.ast import CreateTable, DropTables, Identifier, Insert, TableColumn, Update
11
- from pytest_lazyfixture import lazy_fixture
12
11
 
13
12
  from mindsdb.integrations.handlers.file_handler.file_handler import FileHandler
14
13
  from mindsdb.integrations.libs.response import RESPONSE_TYPE
@@ -75,33 +74,26 @@ def curr_dir():
75
74
  return os.path.dirname(os.path.realpath(__file__))
76
75
 
77
76
 
78
- # Fixtures to get a path to a partiular type of file
79
- @pytest.fixture
80
77
  def csv_file() -> str:
81
78
  return os.path.join(curr_dir(), "data", "test.csv")
82
79
 
83
80
 
84
- @pytest.fixture
85
81
  def xlsx_file() -> str:
86
82
  return os.path.join(curr_dir(), "data", "test.xlsx")
87
83
 
88
84
 
89
- @pytest.fixture
90
85
  def json_file() -> str:
91
86
  return os.path.join(curr_dir(), "data", "test.json")
92
87
 
93
88
 
94
- @pytest.fixture
95
89
  def parquet_file() -> str:
96
90
  return os.path.join(curr_dir(), "data", "test.parquet")
97
91
 
98
92
 
99
- @pytest.fixture
100
93
  def pdf_file() -> str:
101
94
  return os.path.join(curr_dir(), "data", "test.pdf")
102
95
 
103
96
 
104
- @pytest.fixture
105
97
  def txt_file() -> str:
106
98
  return os.path.join(curr_dir(), "data", "test.txt")
107
99
 
@@ -109,56 +101,47 @@ def txt_file() -> str:
109
101
  class TestIsItX:
110
102
  """Tests all of the 'is_it_x()' functions to determine a file's type"""
111
103
 
112
- # We can't test xlsx or parquet here because they're binary files
113
- @pytest.mark.parametrize(
114
- "file_path,result",
115
- [(lazy_fixture("csv_file"), True), (lazy_fixture("json_file"), False)],
116
- )
117
- def test_is_it_csv(self, file_path, result):
118
- with open(file_path, "r") as fh:
119
- assert FileReader.is_csv(StringIO(fh.read())) is result
120
-
121
- @pytest.mark.parametrize(
122
- "file_path,result",
123
- [
124
- (lazy_fixture("csv_file"), 'csv'),
125
- (lazy_fixture("xlsx_file"), 'xlsx'),
126
- (lazy_fixture("json_file"), 'json'),
127
- (lazy_fixture("parquet_file"), 'parquet'),
128
- (lazy_fixture("txt_file"), 'txt'),
129
- (lazy_fixture("pdf_file"), 'pdf'),
130
- ],
131
- )
132
- def test_format(self, file_path, result):
133
- assert FileReader(path=file_path).get_format() == result
134
-
135
- # We can't test xlsx or parquet here because they're binary files
136
- @pytest.mark.parametrize(
137
- "file_path,result",
138
- [
139
- (lazy_fixture("csv_file"), False),
140
- (lazy_fixture("json_file"), True),
141
- (lazy_fixture("txt_file"), False),
142
- ],
143
- )
144
- def test_is_it_json(self, file_path, result):
145
- with open(file_path, "r") as fh:
146
- assert FileReader.is_json(StringIO(fh.read())) is result
147
-
148
- @pytest.mark.parametrize(
149
- "file_path,result",
150
- [
151
- (lazy_fixture("csv_file"), False),
152
- (lazy_fixture("xlsx_file"), False),
153
- (lazy_fixture("json_file"), False),
154
- (lazy_fixture("parquet_file"), True),
155
- (lazy_fixture("txt_file"), False),
156
- (lazy_fixture("pdf_file"), False),
157
- ],
158
- )
159
- def test_is_it_parquet(self, file_path, result):
160
- with open(file_path, "rb") as fh:
161
- assert FileReader.is_parquet(BytesIO(fh.read())) is result
104
+ def test_is_it_csv(self):
105
+ # We can't test xlsx or parquet here because they're binary files
106
+ for file_path, result in (
107
+ (csv_file(), True),
108
+ (json_file(), False)
109
+ ):
110
+ with open(file_path, "r") as fh:
111
+ assert FileReader.is_csv(StringIO(fh.read())) is result
112
+
113
+ def test_format(self):
114
+ for file_path, result in (
115
+ (csv_file(), 'csv'),
116
+ (xlsx_file(), 'xlsx'),
117
+ (json_file(), 'json'),
118
+ (parquet_file(), 'parquet'),
119
+ (txt_file(), 'txt'),
120
+ (pdf_file(), 'pdf'),
121
+ ):
122
+ assert FileReader(path=file_path).get_format() == result
123
+
124
+ def test_is_it_json(self):
125
+ # We can't test xlsx or parquet here because they're binary files
126
+ for file_path, result in (
127
+ (csv_file(), False),
128
+ (json_file(), True),
129
+ (txt_file(), False),
130
+ ):
131
+ with open(file_path, "r") as fh:
132
+ assert FileReader.is_json(StringIO(fh.read())) is result
133
+
134
+ def test_is_it_parquet(self):
135
+ for file_path, result in (
136
+ (csv_file(), False),
137
+ (xlsx_file(), False),
138
+ (json_file(), False),
139
+ (parquet_file(), True),
140
+ (txt_file(), False),
141
+ (pdf_file(), False),
142
+ ):
143
+ with open(file_path, "rb") as fh:
144
+ assert FileReader.is_parquet(BytesIO(fh.read())) is result
162
145
 
163
146
 
164
147
  class TestQuery:
@@ -188,13 +171,14 @@ class TestQuery:
188
171
 
189
172
  assert response.type == RESPONSE_TYPE.ERROR
190
173
 
191
- def test_query_insert(self, csv_file, monkeypatch):
174
+ def test_query_insert(self, monkeypatch):
192
175
  """Test an invalid insert query"""
193
176
  # Create a temporary file to save the csv file to.
177
+ csv_file_path = csv_file()
194
178
  csv_tmp = os.path.join(tempfile.gettempdir(), "test.csv")
195
179
  if os.path.exists(csv_tmp):
196
180
  os.remove(csv_tmp)
197
- shutil.copy(csv_file, csv_tmp)
181
+ shutil.copy(csv_file_path, csv_tmp)
198
182
 
199
183
  def mock_get_file_path(self, name):
200
184
  return csv_tmp
@@ -270,18 +254,7 @@ class TestQuery:
270
254
  file_handler.native_query("INVALID QUERY")
271
255
 
272
256
 
273
- @pytest.mark.parametrize(
274
- "file_path,expected_columns",
275
- [
276
- (lazy_fixture("csv_file"), test_file_content[0]),
277
- (lazy_fixture("xlsx_file"), test_file_content[0]),
278
- (lazy_fixture("json_file"), test_file_content[0]),
279
- (lazy_fixture("parquet_file"), test_file_content[0]),
280
- (lazy_fixture("pdf_file"), ["content", "metadata"]),
281
- (lazy_fixture("txt_file"), ["content", "metadata"]),
282
- ],
283
- )
284
- def test_handle_source(file_path, expected_columns):
257
+ def test_handle_source():
285
258
 
286
259
  def get_reader(file_path):
287
260
  # using path
@@ -300,17 +273,25 @@ def test_handle_source(file_path, expected_columns):
300
273
  reader = FileReader(file=fd, name=Path(file_path).name)
301
274
  yield reader
302
275
 
303
- # using different methods to create reader
304
- for reader in get_reader(file_path):
305
- df = reader.get_page_content()
306
- assert isinstance(df, pandas.DataFrame)
307
-
308
- assert df.columns.tolist() == expected_columns
309
-
310
- # The pdf and txt files have some different content
311
- if reader.get_format() not in ("pdf", "txt"):
312
- assert len(df) == len(test_file_content) - 1
313
- assert df.values.tolist() == test_file_content[1:]
276
+ for file_path, expected_columns in (
277
+ (csv_file(), test_file_content[0]),
278
+ (xlsx_file(), test_file_content[0]),
279
+ (json_file(), test_file_content[0]),
280
+ (parquet_file(), test_file_content[0]),
281
+ (pdf_file(), ["content", "metadata"]),
282
+ (txt_file(), ["content", "metadata"]),
283
+ ):
284
+ # using different methods to create reader
285
+ for reader in get_reader(file_path):
286
+ df = reader.get_page_content()
287
+ assert isinstance(df, pandas.DataFrame)
288
+
289
+ assert df.columns.tolist() == expected_columns
290
+
291
+ # The pdf and txt files have some different content
292
+ if reader.get_format() not in ("pdf", "txt"):
293
+ assert len(df) == len(test_file_content) - 1
294
+ assert df.values.tolist() == test_file_content[1:]
314
295
 
315
296
 
316
297
  @pytest.mark.parametrize(
@@ -1,5 +1,6 @@
1
+ # NOTE: Any changes made here need to be made to requirements_cpu.txt as well
1
2
  datasets==2.16.1
2
- evaluate
3
- torch
4
- nltk>=3.9
5
- huggingface-hub
3
+ evaluate==0.4.3
4
+ nltk==3.9.1
5
+ huggingface-hub==0.29.3
6
+ torch==2.6.0
@@ -1,6 +1,6 @@
1
- datasets==2.16.1
2
- evaluate
3
- nltk>=3.9
4
- huggingface-hub
5
1
  # Needs to be installed with `pip install --extra-index-url https://download.pytorch.org/whl/ .[huggingface_cpu]`
6
- torch==2.2.0+cpu
2
+ datasets==2.16.1
3
+ evaluate==0.4.3
4
+ nltk==3.9.1
5
+ huggingface-hub==0.29.3
6
+ torch==2.6.0+cpu
@@ -1,7 +1,7 @@
1
1
  wikipedia==1.4.0
2
2
  tiktoken
3
3
  anthropic>=0.26.1
4
- litellm==1.44.8
4
+ litellm==1.63.14
5
5
  chromadb~=0.6.3 # Knowledge bases.
6
6
  -r mindsdb/integrations/handlers/openai_handler/requirements.txt
7
7
  -r mindsdb/integrations/handlers/langchain_embedding_handler/requirements.txt
@@ -1,4 +1,4 @@
1
- lightwood>=25.2.2.0
2
- lightwood[extra]>=25.2.2.0
3
- lightwood[xai]>=25.2.2.0
1
+ lightwood>=25.3.3.3
2
+ lightwood[extra]>=25.3.3.3
3
+ lightwood[xai]>=25.3.3.3
4
4
  type_infer==0.0.20
@@ -1,2 +1,2 @@
1
- litellm==1.44.8
1
+ litellm==1.63.14
2
2
 
@@ -1,4 +1,4 @@
1
- llama-index==0.10.13
1
+ llama-index==0.12.21
2
2
  pydantic-settings >= 2.1.0
3
3
  llama-index-readers-web
4
4
  llama-index-embeddings-openai
@@ -99,4 +99,4 @@ class MSGraphAPIOneDriveClient(MSGraphAPIBaseClient):
99
99
  Returns:
100
100
  bytes: The content of the specified item.
101
101
  """
102
- return self.fetch_data(f"me/drive/root:/{path}:/content")
102
+ return self.fetch_data_content(f"me/drive/root:/{path}:/content")
@@ -0,0 +1,278 @@
1
+ from typing import Text, List, Dict, Optional
2
+
3
+ from requests.exceptions import RequestException
4
+
5
+ from mindsdb.integrations.utilities.handlers.api_utilities.microsoft.ms_graph_api_utilities import MSGraphAPIBaseClient
6
+ from mindsdb.utilities import log
7
+
8
+ logger = log.getLogger(__name__)
9
+
10
+
11
+ class MSGraphAPITeamsDelegatedPermissionsClient(MSGraphAPIBaseClient):
12
+ """
13
+ The Microsoft Graph API client for the Microsoft Teams handler with delegated permissions.
14
+ This client is used for accessing the Microsoft Teams specific endpoints of the Microsoft Graph API.
15
+ Several common methods for submitting requests, fetching data, etc. are inherited from the base class.
16
+ """
17
+
18
+ def check_connection(self) -> bool:
19
+ """
20
+ Check if the connection to Microsoft Teams is established.
21
+
22
+ Returns:
23
+ bool: True if the connection is established, False otherwise.
24
+ """
25
+ try:
26
+ self.fetch_data_json("me/joinedTeams")
27
+ return True
28
+ except RequestException as request_error:
29
+ logger.error(f"Failed to check connection to Microsoft Teams: {request_error}")
30
+ return False
31
+
32
+ def get_all_groups(self) -> List[Dict]:
33
+ """
34
+ Get all groups that the signed in user is a member of.
35
+
36
+ Returns:
37
+ List[Dict]: The groups data.
38
+ """
39
+ return self.fetch_data_json("me/joinedTeams")
40
+
41
+ def _get_all_group_ids(self) -> List[Text]:
42
+ """
43
+ Get all group IDs related to Microsoft Teams.
44
+
45
+ Returns:
46
+ List[Text]: The group IDs.
47
+ """
48
+ if not self._group_ids:
49
+ groups = self.get_all_groups()
50
+ self._group_ids = [group["id"] for group in groups]
51
+
52
+ return self._group_ids
53
+
54
+ def get_channel_in_group_by_id(self, group_id: Text, channel_id: Text) -> Dict:
55
+ """
56
+ Get a channel by its ID and the ID of the group that it belongs to.
57
+
58
+ Args:
59
+ group_id (Text): The ID of the group that the channel belongs to.
60
+ channel_id (Text): The ID of the channel.
61
+
62
+ Returns:
63
+ Dict: The channel data.
64
+ """
65
+ channel = self.fetch_data_json(f"teams/{group_id}/channels/{channel_id}")
66
+ # Add the group ID to the channel data.
67
+ channel.update({"teamId": group_id})
68
+
69
+ return channel
70
+
71
+ def get_channels_in_group_by_ids(self, group_id: Text, channel_ids: List[Text]) -> List[Dict]:
72
+ """
73
+ Get channels by their IDs and the ID of the group that they belong to.
74
+
75
+ Args:
76
+ group_id (Text): The ID of the group that the channels belong to.
77
+ channel_ids (List[Text]): The IDs of the channels.
78
+
79
+ Returns:
80
+ List[Dict]: The channels data.
81
+ """
82
+ channels = []
83
+ for channel_id in channel_ids:
84
+ channels.append(self.get_channel_in_group_by_id(group_id, channel_id))
85
+
86
+ return channels
87
+
88
+ def get_all_channels_in_group(self, group_id: Text) -> List[Dict]:
89
+ """
90
+ Get all channels of a group by its ID.
91
+
92
+ Args:
93
+ group_id (Text): The ID of the group.
94
+
95
+ Returns:
96
+ List[Dict]: The channels data.
97
+ """
98
+ channels = self.fetch_data_json(f"teams/{group_id}/channels")
99
+ for channel in channels:
100
+ channel["teamId"] = group_id
101
+
102
+ return channels
103
+
104
+ def get_all_channels_across_all_groups(self) -> List[Dict]:
105
+ """
106
+ Get all channels across all groups that the signed in user is a member of.
107
+
108
+ Returns:
109
+ List[Dict]: The channels data.
110
+ """
111
+ channels = []
112
+ for group_id in self._get_all_group_ids():
113
+ channels += self.get_all_channels_in_group(group_id)
114
+
115
+ return channels
116
+
117
+ def get_channels_across_all_groups_by_ids(self, channel_ids: List[Text]) -> List[Dict]:
118
+ """
119
+ Get channels by their IDs.
120
+
121
+ Args:
122
+ channel_ids (List[Text]): The IDs of the channels.
123
+
124
+ Returns:
125
+ List[Dict]: The channels data.
126
+ """
127
+ channels = self.get_all_channels_across_all_groups()
128
+
129
+ return [channel for channel in channels if channel["id"] in channel_ids]
130
+
131
+ def get_message_in_channel_by_id(self, group_id: Text, channel_id: Text, message_id: Text) -> Dict:
132
+ """
133
+ Get a message by its ID, the ID of the group that it belongs to, and the ID of the channel that it belongs to.
134
+
135
+ Args:
136
+ group_id (Text): The ID of the group that the channel belongs to.
137
+ channel_id (Text): The ID of the channel that the message belongs to.
138
+ message_id (Text): The ID of the message.
139
+
140
+ Returns:
141
+ Dict: The message data.
142
+ """
143
+ return self.fetch_data_json(f"teams/{group_id}/channels/{channel_id}/messages/{message_id}")
144
+
145
+ def get_messages_in_channel_by_ids(self, group_id: Text, channel_id: Text, message_ids: List[Text]) -> List[Dict]:
146
+ """
147
+ Get messages by their IDs, the ID of the group that they belong to, and the ID of the channel that they belong to.
148
+
149
+ Args:
150
+ group_id (Text): The ID of the group that the channel belongs to.
151
+ channel_id (Text): The ID of the channel that the messages belong to.
152
+ message_ids (List[Text]): The IDs of the messages.
153
+
154
+ Returns:
155
+ List[Dict]: The messages data.
156
+ """
157
+ messages = []
158
+ for message_id in message_ids:
159
+ messages.append(self.get_message_in_channel_by_id(group_id, channel_id, message_id))
160
+
161
+ return messages
162
+
163
+ def get_all_messages_in_channel(self, group_id: Text, channel_id: Text, limit: int = None) -> List[Dict]:
164
+ """
165
+ Get messages of a channel by its ID and the ID of the group that it belongs to.
166
+
167
+ Args:
168
+ group_id (Text): The ID of the group that the channel belongs to.
169
+ channel_id (Text): The ID of the channel.
170
+
171
+ Returns:
172
+ List[Dict]: The messages data.
173
+ """
174
+ messages = []
175
+ for messages_batch in self.fetch_paginated_data(f"teams/{group_id}/channels/{channel_id}/messages"):
176
+ messages += messages_batch
177
+
178
+ if limit and len(messages) >= limit:
179
+ break
180
+
181
+ return messages[:limit]
182
+
183
+ def get_chat_by_id(self, chat_id: Text) -> Dict:
184
+ """
185
+ Get a chat by its ID.
186
+
187
+ Args:
188
+ chat_id (Text): The ID of the chat.
189
+
190
+ Returns:
191
+ Dict: The chat data.
192
+ """
193
+ return self.fetch_data_json(f"/me/chats/{chat_id}")
194
+
195
+ def get_chats_by_ids(self, chat_ids: List[Text]) -> List[Dict]:
196
+ """
197
+ Get chats by their IDs.
198
+
199
+ Args:
200
+ chat_ids (List[Text]): The IDs of the chats.
201
+
202
+ Returns:
203
+ List[Dict]: The chats data.
204
+ """
205
+ chats = []
206
+ for chat_id in chat_ids:
207
+ chats.append(self.get_chat_by_id(chat_id))
208
+
209
+ return chats
210
+
211
+ def get_all_chats(self, limit: int = None) -> List[Dict]:
212
+ """
213
+ Get all chats of the signed in user.
214
+
215
+ Args:
216
+ limit (int): The maximum number of chats to return.
217
+
218
+ Returns:
219
+ List[Dict]: The chats data.
220
+ """
221
+ chats = []
222
+ for chat_batch in self.fetch_paginated_data("me/chats"):
223
+ chats += chat_batch
224
+
225
+ if limit and len(chats) >= limit:
226
+ break
227
+
228
+ return chats[:limit]
229
+
230
+ def get_message_in_chat_by_id(self, chat_id: Text, message_id: Text) -> Dict:
231
+ """
232
+ Get a message by its ID and the ID of the chat that it belongs to.
233
+
234
+ Args:
235
+ chat_id (Text): The ID of the chat that the message belongs to.
236
+ message_id (Text): The ID of the message.
237
+
238
+ Returns:
239
+ Dict: The message data.
240
+ """
241
+ return self.fetch_data_json(f"me/chats/{chat_id}/messages/{message_id}")
242
+
243
+ def get_messages_in_chat_by_ids(self, chat_id: Text, message_ids: List[Text]) -> List[Dict]:
244
+ """
245
+ Get messages by their IDs and the ID of the chat that they belong to.
246
+
247
+ Args:
248
+ chat_id (Text): The ID of the chat that the messages belong to.
249
+ message_ids (List[Text]): The IDs of the messages.
250
+
251
+ Returns:
252
+ List[Dict]: The messages data.
253
+ """
254
+ messages = []
255
+ for message_id in message_ids:
256
+ messages.append(self.get_message_in_chat_by_id(chat_id, message_id))
257
+
258
+ return messages
259
+
260
+ def get_all_messages_in_chat(self, chat_id: Text, limit: int = None) -> List[Dict]:
261
+ """
262
+ Get messages of a chat by its ID.
263
+
264
+ Args:
265
+ chat_id (Text): The ID of the chat.
266
+
267
+ Returns:
268
+ List[Dict]: The messages data.
269
+ """
270
+ messages = []
271
+ for messages_batch in self.fetch_paginated_data(f"me/chats/{chat_id}/messages"):
272
+ messages += messages_batch
273
+
274
+ if limit and len(messages) >= limit:
275
+ break
276
+
277
+ return messages[:limit]
278
+