algolia 3.8.2 → 3.10.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -95,7 +95,7 @@ module Algolia
95
95
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::AddApiKeyResponse")
96
96
  end
97
97
 
98
- # If a record with the specified object ID exists, the existing record is replaced. Otherwise, a new record is added to the index. To update _some_ attributes of an existing record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject) instead. To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch).
98
+ # If a record with the specified object ID exists, the existing record is replaced. Otherwise, a new record is added to the index. If you want to use auto-generated object IDs, use the [`saveObject` operation](#tag/Records/operation/saveObject). To update _some_ attributes of an existing record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject) instead. To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch).
99
99
  #
100
100
  # Required API Key ACLs:
101
101
  # - addObject
@@ -140,7 +140,7 @@ module Algolia
140
140
  @api_client.call_api(:PUT, path, new_options)
141
141
  end
142
142
 
143
- # If a record with the specified object ID exists, the existing record is replaced. Otherwise, a new record is added to the index. To update _some_ attributes of an existing record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject) instead. To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch).
143
+ # If a record with the specified object ID exists, the existing record is replaced. Otherwise, a new record is added to the index. If you want to use auto-generated object IDs, use the [`saveObject` operation](#tag/Records/operation/saveObject). To update _some_ attributes of an existing record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject) instead. To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch).
144
144
  #
145
145
  # Required API Key ACLs:
146
146
  # - addObject
@@ -252,7 +252,7 @@ module Algolia
252
252
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::CreatedAtResponse")
253
253
  end
254
254
 
255
- # Adds, updates, or deletes records in one index with a single API request. Batching index updates reduces latency and increases data integrity. - Actions are applied in the order they're specified. - Actions are equivalent to the individual API requests of the same name.
255
+ # Adds, updates, or deletes records in one index with a single API request. Batching index updates reduces latency and increases data integrity. - Actions are applied in the order they're specified. - Actions are equivalent to the individual API requests of the same name. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
256
256
 
257
257
  # @param index_name [String] Name of the index on which to perform the operation. (required)
258
258
  # @param batch_write_params [BatchWriteParams] (required)
@@ -287,7 +287,7 @@ module Algolia
287
287
  @api_client.call_api(:POST, path, new_options)
288
288
  end
289
289
 
290
- # Adds, updates, or deletes records in one index with a single API request. Batching index updates reduces latency and increases data integrity. - Actions are applied in the order they're specified. - Actions are equivalent to the individual API requests of the same name.
290
+ # Adds, updates, or deletes records in one index with a single API request. Batching index updates reduces latency and increases data integrity. - Actions are applied in the order they're specified. - Actions are equivalent to the individual API requests of the same name. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
291
291
 
292
292
  # @param index_name [String] Name of the index on which to perform the operation. (required)
293
293
  # @param batch_write_params [BatchWriteParams] (required)
@@ -458,7 +458,7 @@ module Algolia
458
458
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::BrowseResponse")
459
459
  end
460
460
 
461
- # Deletes only the records from an index while keeping settings, synonyms, and rules.
461
+ # Deletes only the records from an index while keeping settings, synonyms, and rules. This operation is resource-intensive and subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
462
462
  #
463
463
  # Required API Key ACLs:
464
464
  # - deleteIndex
@@ -490,7 +490,7 @@ module Algolia
490
490
  @api_client.call_api(:POST, path, new_options)
491
491
  end
492
492
 
493
- # Deletes only the records from an index while keeping settings, synonyms, and rules.
493
+ # Deletes only the records from an index while keeping settings, synonyms, and rules. This operation is resource-intensive and subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
494
494
  #
495
495
  # Required API Key ACLs:
496
496
  # - deleteIndex
@@ -816,7 +816,7 @@ module Algolia
816
816
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::DeleteApiKeyResponse")
817
817
  end
818
818
 
819
- # This operation doesn't accept empty queries or filters. It's more efficient to get a list of object IDs with the [`browse` operation](#tag/Search/operation/browse), and then delete the records using the [`batch` operation](#tag/Records/operation/batch).
819
+ # This operation doesn't accept empty filters. This operation is resource-intensive. You should only use it if you can't get the object IDs of the records you want to delete. It's more efficient to get a list of object IDs with the [`browse` operation](#tag/Search/operation/browse), and then delete the records using the [`batch` operation](#tag/Records/operation/batch). This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
820
820
  #
821
821
  # Required API Key ACLs:
822
822
  # - deleteIndex
@@ -853,7 +853,7 @@ module Algolia
853
853
  @api_client.call_api(:POST, path, new_options)
854
854
  end
855
855
 
856
- # This operation doesn't accept empty queries or filters. It's more efficient to get a list of object IDs with the [`browse` operation](#tag/Search/operation/browse), and then delete the records using the [`batch` operation](#tag/Records/operation/batch).
856
+ # This operation doesn't accept empty filters. This operation is resource-intensive. You should only use it if you can't get the object IDs of the records you want to delete. It's more efficient to get a list of object IDs with the [`browse` operation](#tag/Search/operation/browse), and then delete the records using the [`batch` operation](#tag/Records/operation/batch). This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
857
857
  #
858
858
  # Required API Key ACLs:
859
859
  # - deleteIndex
@@ -1951,7 +1951,7 @@ module Algolia
1951
1951
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::ListUserIdsResponse")
1952
1952
  end
1953
1953
 
1954
- # Adds, updates, or deletes records in multiple indices with a single API request. - Actions are applied in the order they are specified. - Actions are equivalent to the individual API requests of the same name.
1954
+ # Adds, updates, or deletes records in multiple indices with a single API request. - Actions are applied in the order they are specified. - Actions are equivalent to the individual API requests of the same name. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
1955
1955
 
1956
1956
  # @param batch_params [BatchParams] (required)
1957
1957
  # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional)
@@ -1981,7 +1981,7 @@ module Algolia
1981
1981
  @api_client.call_api(:POST, path, new_options)
1982
1982
  end
1983
1983
 
1984
- # Adds, updates, or deletes records in multiple indices with a single API request. - Actions are applied in the order they are specified. - Actions are equivalent to the individual API requests of the same name.
1984
+ # Adds, updates, or deletes records in multiple indices with a single API request. - Actions are applied in the order they are specified. - Actions are equivalent to the individual API requests of the same name. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
1985
1985
 
1986
1986
  # @param batch_params [BatchParams] (required)
1987
1987
  # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional)
@@ -1991,7 +1991,7 @@ module Algolia
1991
1991
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::MultipleBatchResponse")
1992
1992
  end
1993
1993
 
1994
- # Copies or moves (renames) an index within the same Algolia application. - Existing destination indices are overwritten, except for their analytics data. - If the destination index doesn't exist yet, it'll be created. **Copy** - Copying a source index that doesn't exist creates a new index with 0 records and default settings. - The API keys of the source index are merged with the existing keys in the destination index. - You can't copy the `enableReRanking`, `mode`, and `replicas` settings. - You can't copy to a destination index that already has replicas. - Be aware of the [size limits](https://www.algolia.com/doc/guides/scaling/algolia-service-limits/#application-record-and-index-limits). - Related guide: [Copy indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/copy-indices/) **Move** - Moving a source index that doesn't exist is ignored without returning an error. - When moving an index, the analytics data keeps its original name, and a new set of analytics data is started for the new name. To access the original analytics in the dashboard, create an index with the original name. - If the destination index has replicas, moving will overwrite the existing index and copy the data to the replica indices. - Related guide: [Move indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/move-indices/).
1994
+ # Copies or moves (renames) an index within the same Algolia application. - Existing destination indices are overwritten, except for their analytics data. - If the destination index doesn't exist yet, it'll be created. - This operation is resource-intensive. **Copy** - Copying a source index that doesn't exist creates a new index with 0 records and default settings. - The API keys of the source index are merged with the existing keys in the destination index. - You can't copy the `enableReRanking`, `mode`, and `replicas` settings. - You can't copy to a destination index that already has replicas. - Be aware of the [size limits](https://www.algolia.com/doc/guides/scaling/algolia-service-limits/#application-record-and-index-limits). - Related guide: [Copy indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/copy-indices/) **Move** - Moving a source index that doesn't exist is ignored without returning an error. - When moving an index, the analytics data keeps its original name, and a new set of analytics data is started for the new name. To access the original analytics in the dashboard, create an index with the original name. - If the destination index has replicas, moving will overwrite the existing index and copy the data to the replica indices. - Related guide: [Move indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/move-indices/). This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
1995
1995
  #
1996
1996
  # Required API Key ACLs:
1997
1997
  # - addObject
@@ -2028,7 +2028,7 @@ module Algolia
2028
2028
  @api_client.call_api(:POST, path, new_options)
2029
2029
  end
2030
2030
 
2031
- # Copies or moves (renames) an index within the same Algolia application. - Existing destination indices are overwritten, except for their analytics data. - If the destination index doesn't exist yet, it'll be created. **Copy** - Copying a source index that doesn't exist creates a new index with 0 records and default settings. - The API keys of the source index are merged with the existing keys in the destination index. - You can't copy the `enableReRanking`, `mode`, and `replicas` settings. - You can't copy to a destination index that already has replicas. - Be aware of the [size limits](https://www.algolia.com/doc/guides/scaling/algolia-service-limits/#application-record-and-index-limits). - Related guide: [Copy indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/copy-indices/) **Move** - Moving a source index that doesn't exist is ignored without returning an error. - When moving an index, the analytics data keeps its original name, and a new set of analytics data is started for the new name. To access the original analytics in the dashboard, create an index with the original name. - If the destination index has replicas, moving will overwrite the existing index and copy the data to the replica indices. - Related guide: [Move indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/move-indices/).
2031
+ # Copies or moves (renames) an index within the same Algolia application. - Existing destination indices are overwritten, except for their analytics data. - If the destination index doesn't exist yet, it'll be created. - This operation is resource-intensive. **Copy** - Copying a source index that doesn't exist creates a new index with 0 records and default settings. - The API keys of the source index are merged with the existing keys in the destination index. - You can't copy the `enableReRanking`, `mode`, and `replicas` settings. - You can't copy to a destination index that already has replicas. - Be aware of the [size limits](https://www.algolia.com/doc/guides/scaling/algolia-service-limits/#application-record-and-index-limits). - Related guide: [Copy indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/copy-indices/) **Move** - Moving a source index that doesn't exist is ignored without returning an error. - When moving an index, the analytics data keeps its original name, and a new set of analytics data is started for the new name. To access the original analytics in the dashboard, create an index with the original name. - If the destination index has replicas, moving will overwrite the existing index and copy the data to the replica indices. - Related guide: [Move indices](https://www.algolia.com/doc/guides/sending-and-managing-data/manage-indices-and-apps/manage-indices/how-to/move-indices/). This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2032
2032
  #
2033
2033
  # Required API Key ACLs:
2034
2034
  # - addObject
@@ -2041,7 +2041,7 @@ module Algolia
2041
2041
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::UpdatedAtResponse")
2042
2042
  end
2043
2043
 
2044
- # Adds new attributes to a record, or updates existing ones. - If a record with the specified object ID doesn't exist, a new record is added to the index **if** `createIfNotExists` is true. - If the index doesn't exist yet, this method creates a new index. - You can use any first-level attribute but not nested attributes. If you specify a nested attribute, the engine treats it as a replacement for its first-level ancestor. To update an attribute without pushing the entire record, you can use these built-in operations. These operations can be helpful if you don't have access to your initial data. - Increment: increment a numeric attribute - Decrement: decrement a numeric attribute - Add: append a number or string element to an array attribute - Remove: remove all matching number or string elements from an array attribute made of numbers or strings - AddUnique: add a number or string element to an array attribute made of numbers or strings only if it's not already present - IncrementFrom: increment a numeric integer attribute only if the provided value matches the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementFrom value of 2 for the version attribute, but the current value of the attribute is 1, the engine ignores the update. If the object doesn't exist, the engine only creates it if you pass an IncrementFrom value of 0. - IncrementSet: increment a numeric integer attribute only if the provided value is greater than the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementSet value of 2 for the version attribute, and the current value of the attribute is 1, the engine updates the object. If the object doesn't exist yet, the engine only creates it if you pass an IncrementSet value greater than 0. You can specify an operation by providing an object with the attribute to update as the key and its value being an object with the following properties: - _operation: the operation to apply on the attribute - value: the right-hand side argument to the operation, for example, increment or decrement step, value to add or remove.
2044
+ # Adds new attributes to a record, or updates existing ones. - If a record with the specified object ID doesn't exist, a new record is added to the index **if** `createIfNotExists` is true. - If the index doesn't exist yet, this method creates a new index. - You can use any first-level attribute but not nested attributes. If you specify a nested attribute, this operation replaces its first-level ancestor. To update an attribute without pushing the entire record, you can use these built-in operations. These operations can be helpful if you don't have access to your initial data. - Increment: increment a numeric attribute - Decrement: decrement a numeric attribute - Add: append a number or string element to an array attribute - Remove: remove all matching number or string elements from an array attribute made of numbers or strings - AddUnique: add a number or string element to an array attribute made of numbers or strings only if it's not already present - IncrementFrom: increment a numeric integer attribute only if the provided value matches the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementFrom value of 2 for the version attribute, but the current value of the attribute is 1, the engine ignores the update. If the object doesn't exist, the engine only creates it if you pass an IncrementFrom value of 0. - IncrementSet: increment a numeric integer attribute only if the provided value is greater than the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementSet value of 2 for the version attribute, and the current value of the attribute is 1, the engine updates the object. If the object doesn't exist yet, the engine only creates it if you pass an IncrementSet value greater than 0. You can specify an operation by providing an object with the attribute to update as the key and its value being an object with the following properties: - _operation: the operation to apply on the attribute - value: the right-hand side argument to the operation, for example, increment or decrement step, value to add or remove. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2045
2045
  #
2046
2046
  # Required API Key ACLs:
2047
2047
  # - addObject
@@ -2093,7 +2093,7 @@ module Algolia
2093
2093
  @api_client.call_api(:POST, path, new_options)
2094
2094
  end
2095
2095
 
2096
- # Adds new attributes to a record, or updates existing ones. - If a record with the specified object ID doesn't exist, a new record is added to the index **if** `createIfNotExists` is true. - If the index doesn't exist yet, this method creates a new index. - You can use any first-level attribute but not nested attributes. If you specify a nested attribute, the engine treats it as a replacement for its first-level ancestor. To update an attribute without pushing the entire record, you can use these built-in operations. These operations can be helpful if you don't have access to your initial data. - Increment: increment a numeric attribute - Decrement: decrement a numeric attribute - Add: append a number or string element to an array attribute - Remove: remove all matching number or string elements from an array attribute made of numbers or strings - AddUnique: add a number or string element to an array attribute made of numbers or strings only if it's not already present - IncrementFrom: increment a numeric integer attribute only if the provided value matches the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementFrom value of 2 for the version attribute, but the current value of the attribute is 1, the engine ignores the update. If the object doesn't exist, the engine only creates it if you pass an IncrementFrom value of 0. - IncrementSet: increment a numeric integer attribute only if the provided value is greater than the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementSet value of 2 for the version attribute, and the current value of the attribute is 1, the engine updates the object. If the object doesn't exist yet, the engine only creates it if you pass an IncrementSet value greater than 0. You can specify an operation by providing an object with the attribute to update as the key and its value being an object with the following properties: - _operation: the operation to apply on the attribute - value: the right-hand side argument to the operation, for example, increment or decrement step, value to add or remove.
2096
+ # Adds new attributes to a record, or updates existing ones. - If a record with the specified object ID doesn't exist, a new record is added to the index **if** `createIfNotExists` is true. - If the index doesn't exist yet, this method creates a new index. - You can use any first-level attribute but not nested attributes. If you specify a nested attribute, this operation replaces its first-level ancestor. To update an attribute without pushing the entire record, you can use these built-in operations. These operations can be helpful if you don't have access to your initial data. - Increment: increment a numeric attribute - Decrement: decrement a numeric attribute - Add: append a number or string element to an array attribute - Remove: remove all matching number or string elements from an array attribute made of numbers or strings - AddUnique: add a number or string element to an array attribute made of numbers or strings only if it's not already present - IncrementFrom: increment a numeric integer attribute only if the provided value matches the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementFrom value of 2 for the version attribute, but the current value of the attribute is 1, the engine ignores the update. If the object doesn't exist, the engine only creates it if you pass an IncrementFrom value of 0. - IncrementSet: increment a numeric integer attribute only if the provided value is greater than the current value, and otherwise ignore the whole object update. For example, if you pass an IncrementSet value of 2 for the version attribute, and the current value of the attribute is 1, the engine updates the object. If the object doesn't exist yet, the engine only creates it if you pass an IncrementSet value greater than 0. You can specify an operation by providing an object with the attribute to update as the key and its value being an object with the following properties: - _operation: the operation to apply on the attribute - value: the right-hand side argument to the operation, for example, increment or decrement step, value to add or remove. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2097
2097
  #
2098
2098
  # Required API Key ACLs:
2099
2099
  # - addObject
@@ -2255,7 +2255,7 @@ module Algolia
2255
2255
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::AddApiKeyResponse")
2256
2256
  end
2257
2257
 
2258
- # Adds a record to an index or replace it. - If the record doesn't have an object ID, a new record with an auto-generated object ID is added to your index. - If a record with the specified object ID exists, the existing record is replaced. - If a record with the specified object ID doesn't exist, a new record is added to your index. - If you add a record to an index that doesn't exist yet, a new index is created. To update _some_ attributes of a record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject). To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch).
2258
+ # Adds a record to an index or replaces it. - If the record doesn't have an object ID, a new record with an auto-generated object ID is added to your index. - If a record with the specified object ID exists, the existing record is replaced. - If a record with the specified object ID doesn't exist, a new record is added to your index. - If you add a record to an index that doesn't exist yet, a new index is created. To update _some_ attributes of a record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject). To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch). This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2259
2259
  #
2260
2260
  # Required API Key ACLs:
2261
2261
  # - addObject
@@ -2292,7 +2292,7 @@ module Algolia
2292
2292
  @api_client.call_api(:POST, path, new_options)
2293
2293
  end
2294
2294
 
2295
- # Adds a record to an index or replace it. - If the record doesn't have an object ID, a new record with an auto-generated object ID is added to your index. - If a record with the specified object ID exists, the existing record is replaced. - If a record with the specified object ID doesn't exist, a new record is added to your index. - If you add a record to an index that doesn't exist yet, a new index is created. To update _some_ attributes of a record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject). To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch).
2295
+ # Adds a record to an index or replaces it. - If the record doesn't have an object ID, a new record with an auto-generated object ID is added to your index. - If a record with the specified object ID exists, the existing record is replaced. - If a record with the specified object ID doesn't exist, a new record is added to your index. - If you add a record to an index that doesn't exist yet, a new index is created. To update _some_ attributes of a record, use the [`partial` operation](#tag/Records/operation/partialUpdateObject). To add, update, or replace multiple records, use the [`batch` operation](#tag/Records/operation/batch). This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2296
2296
  #
2297
2297
  # Required API Key ACLs:
2298
2298
  # - addObject
@@ -2360,13 +2360,13 @@ module Algolia
2360
2360
  # @param rule [Rule] (required)
2361
2361
  # @param forward_to_replicas [Boolean] Whether changes are applied to replica indices.
2362
2362
  # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional)
2363
- # @return [UpdatedRuleResponse]
2363
+ # @return [UpdatedAtResponse]
2364
2364
  def save_rule(index_name, object_id, rule, forward_to_replicas = nil, request_options = {})
2365
2365
  response = save_rule_with_http_info(index_name, object_id, rule, forward_to_replicas, request_options)
2366
- @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::UpdatedRuleResponse")
2366
+ @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::UpdatedAtResponse")
2367
2367
  end
2368
2368
 
2369
- # Create or update multiple rules. If a rule with the specified object ID doesn't exist, Algolia creates a new one. Otherwise, existing rules are replaced.
2369
+ # Create or update multiple rules. If a rule with the specified object ID doesn't exist, Algolia creates a new one. Otherwise, existing rules are replaced. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2370
2370
  #
2371
2371
  # Required API Key ACLs:
2372
2372
  # - editSettings
@@ -2413,7 +2413,7 @@ module Algolia
2413
2413
  @api_client.call_api(:POST, path, new_options)
2414
2414
  end
2415
2415
 
2416
- # Create or update multiple rules. If a rule with the specified object ID doesn't exist, Algolia creates a new one. Otherwise, existing rules are replaced.
2416
+ # Create or update multiple rules. If a rule with the specified object ID doesn't exist, Algolia creates a new one. Otherwise, existing rules are replaced. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2417
2417
  #
2418
2418
  # Required API Key ACLs:
2419
2419
  # - editSettings
@@ -2495,7 +2495,7 @@ module Algolia
2495
2495
  @api_client.deserialize(response.body, request_options[:debug_return_type] || "Search::SaveSynonymResponse")
2496
2496
  end
2497
2497
 
2498
- # If a synonym with the `objectID` doesn't exist, Algolia adds a new one. Otherwise, existing synonyms are replaced.
2498
+ # If a synonym with the `objectID` doesn't exist, Algolia adds a new one. Otherwise, existing synonyms are replaced. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2499
2499
  #
2500
2500
  # Required API Key ACLs:
2501
2501
  # - editSettings
@@ -2542,7 +2542,7 @@ module Algolia
2542
2542
  @api_client.call_api(:POST, path, new_options)
2543
2543
  end
2544
2544
 
2545
- # If a synonym with the `objectID` doesn't exist, Algolia adds a new one. Otherwise, existing synonyms are replaced.
2545
+ # If a synonym with the `objectID` doesn't exist, Algolia adds a new one. Otherwise, existing synonyms are replaced. This operation is subject to [indexing rate limits](https://support.algolia.com/hc/en-us/articles/4406975251089-Is-there-a-rate-limit-for-indexing-on-Algolia).
2546
2546
  #
2547
2547
  # Required API Key ACLs:
2548
2548
  # - editSettings
@@ -3150,11 +3150,13 @@ module Algolia
3150
3150
  def wait_for_api_key(
3151
3151
  key,
3152
3152
  operation,
3153
- api_key = {},
3153
+ api_key = Search::ApiKey.new,
3154
3154
  max_retries = 50,
3155
3155
  timeout = -> (retry_count) { [retry_count * 200, 5000].min },
3156
3156
  request_options = {}
3157
3157
  )
3158
+ api_key = api_client.object_to_hash(api_key)
3159
+
3158
3160
  retries = 0
3159
3161
  if operation == "update"
3160
3162
  raise ArgumentError, "`api_key` is required when waiting for an `update` operation." if api_key.nil?
@@ -3162,7 +3164,7 @@ module Algolia
3162
3164
  updated_key = get_api_key(key, request_options)
3163
3165
  updated_key_hash = updated_key.to_hash
3164
3166
  equals = true
3165
- api_key.to_hash.each do |k, v|
3167
+ api_key.each do |k, v|
3166
3168
  equals &&= updated_key_hash[k] == v
3167
3169
  end
3168
3170
 
@@ -3201,7 +3203,9 @@ module Algolia
3201
3203
  # @param request_options [Hash] the requestOptions to send along with the query, they will be forwarded to the `browse` method.
3202
3204
  # @param block [Proc] the block to execute on each object of the index.
3203
3205
  def browse_objects(index_name, browse_params = Search::BrowseParamsObject.new, request_options = {}, &block)
3204
- browse_params[:hits_per_page] = browse_params[:hits_per_page] || 1000
3206
+ browse_params = api_client.object_to_hash(browse_params)
3207
+
3208
+ browse_params[:hitsPerPage] = 1000 unless browse_params.key?(:hitsPerPage)
3205
3209
 
3206
3210
  hits = []
3207
3211
  loop do
@@ -3214,8 +3218,8 @@ module Algolia
3214
3218
  hits.concat(res.hits)
3215
3219
  end
3216
3220
 
3217
- browse_params.cursor = res.cursor
3218
- break if browse_params.cursor.nil?
3221
+ browse_params[:cursor] = res.cursor
3222
+ break if browse_params[:cursor].nil?
3219
3223
  end
3220
3224
 
3221
3225
  hits unless block_given?
@@ -3227,12 +3231,11 @@ module Algolia
3227
3231
  # @param search_rules_params [SearchRulesParams] the parameters to send along with the query, they will be forwarded to the `searchRules` method.
3228
3232
  # @param request_options [Hash] the requestOptions to send along with the query, they will be forwarded to the `searchRules` method.
3229
3233
  # @param block [Proc] the block to execute on each rule of the index.
3230
- def browse_rules(
3231
- index_name,
3232
- search_rules_params = Search::SearchRulesParams.new(hits_per_page: 1000, page: 0),
3233
- request_options = {},
3234
- &block
3235
- )
3234
+ def browse_rules(index_name, search_rules_params = Search::SearchRulesParams.new, request_options = {}, &block)
3235
+ search_rules_params = api_client.object_to_hash(search_rules_params)
3236
+
3237
+ search_rules_params[:hitsPerPage] = 1000 unless search_rules_params.key?(:hitsPerPage)
3238
+
3236
3239
  rules = []
3237
3240
  loop do
3238
3241
  res = search_rules(index_name, search_rules_params, request_options)
@@ -3244,8 +3247,8 @@ module Algolia
3244
3247
  rules.concat(res.hits)
3245
3248
  end
3246
3249
 
3247
- search_rules_params.page += 1
3248
- break if res.hits.length < search_rules_params.hits_per_page
3250
+ search_rules_params[:page] += 1
3251
+ break if res.hits.length < search_rules_params[:hitsPerPage]
3249
3252
  end
3250
3253
 
3251
3254
  rules unless block_given?
@@ -3259,10 +3262,14 @@ module Algolia
3259
3262
  # @param block [Proc] the block to execute on each synonym of the index.
3260
3263
  def browse_synonyms(
3261
3264
  index_name,
3262
- search_synonyms_params = Search::SearchSynonymsParams.new(hits_per_page: 1000, page: 0),
3265
+ search_synonyms_params = Search::SearchSynonymsParams.new,
3263
3266
  request_options = {},
3264
3267
  &block
3265
3268
  )
3269
+ search_synonyms_params = api_client.object_to_hash(search_synonyms_params)
3270
+
3271
+ search_synonyms_params[:hitsPerPage] = 1000 unless search_synonyms_params.key?(:hitsPerPage)
3272
+
3266
3273
  synonyms = []
3267
3274
  loop do
3268
3275
  res = search_synonyms(index_name, search_synonyms_params, request_options)
@@ -3274,8 +3281,8 @@ module Algolia
3274
3281
  synonyms.concat(res.hits)
3275
3282
  end
3276
3283
 
3277
- search_synonyms_params.page += 1
3278
- break if res.hits.length < search_synonyms_params.hits_per_page
3284
+ search_synonyms_params[:page] += 1
3285
+ break if res.hits.length < search_synonyms_params[:hitsPerPage]
3279
3286
  end
3280
3287
 
3281
3288
  synonyms unless block_given?
@@ -3288,7 +3295,7 @@ module Algolia
3288
3295
  #
3289
3296
  # @return [String]
3290
3297
  #
3291
- def generate_secured_api_key(parent_api_key, restrictions = {})
3298
+ def self.generate_secured_api_key(parent_api_key, restrictions = {})
3292
3299
  restrictions = restrictions.to_hash
3293
3300
  if restrictions.key?(:searchParams)
3294
3301
  # merge searchParams with the root of the restrictions
@@ -3310,13 +3317,24 @@ module Algolia
3310
3317
  Base64.encode64("#{hmac}#{url_encoded_restrictions}").gsub("\n", "")
3311
3318
  end
3312
3319
 
3320
+ # Helper: Generates a secured API key based on the given `parent_api_key` and given `restrictions`.
3321
+ #
3322
+ # @param parent_api_key [String] Parent API key used the generate the secured key
3323
+ # @param restrictions [SecuredApiKeyRestrictions] Restrictions to apply on the secured key
3324
+ #
3325
+ # @return [String]
3326
+ #
3327
+ def generate_secured_api_key(parent_api_key, restrictions = {})
3328
+ self.class.generate_secured_api_key(parent_api_key, restrictions)
3329
+ end
3330
+
3313
3331
  # Helper: Retrieves the remaining validity of the previous generated `secured_api_key`, the `validUntil` parameter must have been provided.
3314
3332
  #
3315
3333
  # @param secured_api_key [String]
3316
3334
  #
3317
3335
  # @return [Integer]
3318
3336
  #
3319
- def get_secured_api_key_remaining_validity(secured_api_key)
3337
+ def self.get_secured_api_key_remaining_validity(secured_api_key)
3320
3338
  now = Time.now.to_i
3321
3339
  decoded_key = Base64.decode64(secured_api_key)
3322
3340
  regex = "validUntil=(\\d+)"
@@ -3331,22 +3349,33 @@ module Algolia
3331
3349
  valid_until - now
3332
3350
  end
3333
3351
 
3352
+ # Helper: Retrieves the remaining validity of the previous generated `secured_api_key`, the `validUntil` parameter must have been provided.
3353
+ #
3354
+ # @param secured_api_key [String]
3355
+ #
3356
+ # @return [Integer]
3357
+ #
3358
+ def get_secured_api_key_remaining_validity(secured_api_key)
3359
+ self.class.get_secured_api_key_remaining_validity(secured_api_key)
3360
+ end
3361
+
3334
3362
  # Helper: Saves the given array of objects in the given index. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
3335
3363
  #
3336
3364
  # @param index_name [String]: The `index_name` to save `objects` in.
3337
3365
  # @param objects [Array]: The array of `objects` to store in the given Algolia `indexName`.
3338
3366
  # @param wait_for_tasks [Boolean]: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.
3367
+ # @param batch_size [int] The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
3339
3368
  # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional)
3340
3369
  #
3341
3370
  # @return [BatchResponse]
3342
3371
  #
3343
- def save_objects(index_name, objects, wait_for_tasks = false, request_options = {})
3372
+ def save_objects(index_name, objects, wait_for_tasks = false, batch_size = 1000, request_options = {})
3344
3373
  chunked_batch(
3345
3374
  index_name,
3346
3375
  objects,
3347
3376
  Search::Action::ADD_OBJECT,
3348
3377
  wait_for_tasks,
3349
- 1000,
3378
+ batch_size,
3350
3379
  request_options
3351
3380
  )
3352
3381
  end
@@ -3356,17 +3385,18 @@ module Algolia
3356
3385
  # @param index_name [String]: The `index_name` to delete `object_ids` from.
3357
3386
  # @param object_ids [Array]: The object_ids to delete.
3358
3387
  # @param wait_for_tasks [Boolean]: Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.
3388
+ # @param batch_size [int] The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
3359
3389
  # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional)
3360
3390
  #
3361
3391
  # @return [BatchResponse]
3362
3392
  #
3363
- def delete_objects(index_name, object_ids, wait_for_tasks = false, request_options = {})
3393
+ def delete_objects(index_name, object_ids, wait_for_tasks = false, batch_size = 1000, request_options = {})
3364
3394
  chunked_batch(
3365
3395
  index_name,
3366
3396
  object_ids.map { |id| {"objectID" => id} },
3367
3397
  Search::Action::DELETE_OBJECT,
3368
3398
  wait_for_tasks,
3369
- 1000,
3399
+ batch_size,
3370
3400
  request_options
3371
3401
  )
3372
3402
  end
@@ -3377,17 +3407,25 @@ module Algolia
3377
3407
  # @param objects [Array]: The objects to partially update.
3378
3408
  # @param create_if_not_exists [Boolean]: To be provided if non-existing objects are passed, otherwise, the call will fail.
3379
3409
  # @param wait_for_tasks [Boolean] Whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable.
3410
+ # @param batch_size [int] The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000.
3380
3411
  # @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional)
3381
3412
  #
3382
3413
  # @return [BatchResponse]
3383
3414
  #
3384
- def partial_update_objects(index_name, objects, create_if_not_exists, wait_for_tasks = false, request_options = {})
3415
+ def partial_update_objects(
3416
+ index_name,
3417
+ objects,
3418
+ create_if_not_exists,
3419
+ wait_for_tasks = false,
3420
+ batch_size = 1000,
3421
+ request_options = {}
3422
+ )
3385
3423
  chunked_batch(
3386
3424
  index_name,
3387
3425
  objects,
3388
3426
  create_if_not_exists ? Search::Action::PARTIAL_UPDATE_OBJECT : Search::Action::PARTIAL_UPDATE_OBJECT_NO_CREATE,
3389
3427
  wait_for_tasks,
3390
- 1000,
3428
+ batch_size,
3391
3429
  request_options
3392
3430
  )
3393
3431
  end
@@ -3502,10 +3540,12 @@ module Algolia
3502
3540
  def index_exists?(index_name)
3503
3541
  begin
3504
3542
  get_settings(index_name)
3505
- rescue AlgoliaHttpError => e
3506
- return false if e.code == 404
3543
+ rescue Exception => e
3544
+ if e.is_a?(AlgoliaHttpError)
3545
+ return false if e.code == 404
3507
3546
 
3508
- raise e
3547
+ raise e
3548
+ end
3509
3549
  end
3510
3550
 
3511
3551
  true
@@ -25,9 +25,9 @@ module Algolia
25
25
  @app_id = app_id
26
26
  @api_key = api_key
27
27
  @client_side_validation = opts[:client_side_validation].nil? ? true : opts[:client_side_validation]
28
- @write_timeout = opts[:write_timeout] || 30_000
29
- @read_timeout = opts[:read_timeout] || 5_000
30
28
  @connect_timeout = opts[:connect_timeout] || 2_000
29
+ @read_timeout = opts[:read_timeout] || 5_000
30
+ @write_timeout = opts[:write_timeout] || 30_000
31
31
  @compression_type = opts[:compression_type] || "none"
32
32
  @requester = opts[:requester]
33
33
 
@@ -14,22 +14,10 @@ module Algolia
14
14
  GA4_BIGQUERY_EXPORT = "ga4BigqueryExport".freeze
15
15
  JSON = "json".freeze
16
16
  SHOPIFY = "shopify".freeze
17
- SFCC = "sfcc".freeze
18
17
  PUSH = "push".freeze
19
18
 
20
19
  def self.all_vars
21
- @all_vars ||= [
22
- BIGCOMMERCE,
23
- BIGQUERY,
24
- COMMERCETOOLS,
25
- CSV,
26
- DOCKER,
27
- GA4_BIGQUERY_EXPORT,
28
- JSON,
29
- SHOPIFY,
30
- SFCC,
31
- PUSH
32
- ].freeze
20
+ @all_vars ||= [BIGCOMMERCE, BIGQUERY, COMMERCETOOLS, CSV, DOCKER, GA4_BIGQUERY_EXPORT, JSON, SHOPIFY, PUSH].freeze
33
21
  end
34
22
 
35
23
  # Builds the enum from string
@@ -5,11 +5,11 @@ require "time"
5
5
 
6
6
  module Algolia
7
7
  module Ingestion
8
- class SourceWatchResponse
8
+ class WatchResponse
9
9
  # Universally unique identifier (UUID) of a task run.
10
10
  attr_accessor :run_id
11
11
 
12
- # depending on the source type, the validation returns sampling data of your source (JSON, CSV, BigQuery).
12
+ # when used with discovering or validating sources, the sampled data of your source is returned.
13
13
  attr_accessor :data
14
14
 
15
15
  # in case of error, observability events will be added to the response, if any.
@@ -56,7 +56,7 @@ module Algolia
56
56
  if (!attributes.is_a?(Hash))
57
57
  raise(
58
58
  ArgumentError,
59
- "The input argument (attributes) must be a hash in `Algolia::SourceWatchResponse` initialize method"
59
+ "The input argument (attributes) must be a hash in `Algolia::WatchResponse` initialize method"
60
60
  )
61
61
  end
62
62
 
@@ -65,7 +65,7 @@ module Algolia
65
65
  if (!self.class.attribute_map.key?(k.to_sym))
66
66
  raise(
67
67
  ArgumentError,
68
- "`#{k}` is not a valid attribute in `Algolia::SourceWatchResponse`. Please check the name to make sure it's valid. List of attributes: " +
68
+ "`#{k}` is not a valid attribute in `Algolia::WatchResponse`. Please check the name to make sure it's valid. List of attributes: " +
69
69
  self.class.attribute_map.keys.inspect
70
70
  )
71
71
  end
@@ -9,9 +9,10 @@ module Algolia
9
9
  IGNORE_PLURALS = "ignorePlurals".freeze
10
10
  SINGLE_WORD_SYNONYM = "singleWordSynonym".freeze
11
11
  MULTI_WORDS_SYNONYM = "multiWordsSynonym".freeze
12
+ IGNORE_CONJUGATIONS = "ignoreConjugations".freeze
12
13
 
13
14
  def self.all_vars
14
- @all_vars ||= [IGNORE_PLURALS, SINGLE_WORD_SYNONYM, MULTI_WORDS_SYNONYM].freeze
15
+ @all_vars ||= [IGNORE_PLURALS, SINGLE_WORD_SYNONYM, MULTI_WORDS_SYNONYM, IGNORE_CONJUGATIONS].freeze
15
16
  end
16
17
 
17
18
  # Builds the enum from string
@@ -9,9 +9,10 @@ module Algolia
9
9
  IGNORE_PLURALS = "ignorePlurals".freeze
10
10
  SINGLE_WORD_SYNONYM = "singleWordSynonym".freeze
11
11
  MULTI_WORDS_SYNONYM = "multiWordsSynonym".freeze
12
+ IGNORE_CONJUGATIONS = "ignoreConjugations".freeze
12
13
 
13
14
  def self.all_vars
14
- @all_vars ||= [IGNORE_PLURALS, SINGLE_WORD_SYNONYM, MULTI_WORDS_SYNONYM].freeze
15
+ @all_vars ||= [IGNORE_PLURALS, SINGLE_WORD_SYNONYM, MULTI_WORDS_SYNONYM, IGNORE_CONJUGATIONS].freeze
15
16
  end
16
17
 
17
18
  # Builds the enum from string
@@ -181,8 +181,6 @@ module Algolia
181
181
 
182
182
  if attributes.key?(:nb_api_calls)
183
183
  self.nb_api_calls = attributes[:nb_api_calls]
184
- else
185
- self.nb_api_calls = nil
186
184
  end
187
185
 
188
186
  if attributes.key?(:processing_time_ms)
@@ -94,6 +94,8 @@ module Algolia
94
94
 
95
95
  if attributes.key?(:consequence)
96
96
  self.consequence = attributes[:consequence]
97
+ else
98
+ self.consequence = nil
97
99
  end
98
100
 
99
101
  if attributes.key?(:description)
@@ -1,5 +1,5 @@
1
1
  # Code generated by OpenAPI Generator (https://openapi-generator.tech), manual changes will be lost - read more on https://github.com/algolia/api-clients-automation. DO NOT EDIT.
2
2
 
3
3
  module Algolia
4
- VERSION = "3.8.2".freeze
4
+ VERSION = "3.10.2".freeze
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: algolia
3
3
  version: !ruby/object:Gem::Version
4
- version: 3.8.2
4
+ version: 3.10.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - https://alg.li/support
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-11-19 00:00:00.000000000 Z
11
+ date: 2024-12-18 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: faraday
@@ -90,14 +90,14 @@ dependencies:
90
90
  requirements:
91
91
  - - ">="
92
92
  - !ruby/object:Gem::Version
93
- version: '0'
93
+ version: 2.4.10
94
94
  type: :development
95
95
  prerelease: false
96
96
  version_requirements: !ruby/object:Gem::Requirement
97
97
  requirements:
98
98
  - - ">="
99
99
  - !ruby/object:Gem::Version
100
- version: '0'
100
+ version: 2.4.10
101
101
  - !ruby/object:Gem::Dependency
102
102
  name: rake
103
103
  requirement: !ruby/object:Gem::Requirement
@@ -337,7 +337,6 @@ files:
337
337
  - lib/algolia/models/ingestion/source_update_input.rb
338
338
  - lib/algolia/models/ingestion/source_update_response.rb
339
339
  - lib/algolia/models/ingestion/source_update_shopify.rb
340
- - lib/algolia/models/ingestion/source_watch_response.rb
341
340
  - lib/algolia/models/ingestion/streaming_input.rb
342
341
  - lib/algolia/models/ingestion/streaming_trigger.rb
343
342
  - lib/algolia/models/ingestion/streaming_trigger_type.rb
@@ -367,6 +366,7 @@ files:
367
366
  - lib/algolia/models/ingestion/trigger.rb
368
367
  - lib/algolia/models/ingestion/trigger_type.rb
369
368
  - lib/algolia/models/ingestion/trigger_update_input.rb
369
+ - lib/algolia/models/ingestion/watch_response.rb
370
370
  - lib/algolia/models/ingestion/window.rb
371
371
  - lib/algolia/models/insights/add_to_cart_event.rb
372
372
  - lib/algolia/models/insights/added_to_cart_object_ids.rb
@@ -677,7 +677,6 @@ files:
677
677
  - lib/algolia/models/search/update_api_key_response.rb
678
678
  - lib/algolia/models/search/updated_at_response.rb
679
679
  - lib/algolia/models/search/updated_at_with_object_id_response.rb
680
- - lib/algolia/models/search/updated_rule_response.rb
681
680
  - lib/algolia/models/search/user_highlight_result.rb
682
681
  - lib/algolia/models/search/user_hit.rb
683
682
  - lib/algolia/models/search/user_id.rb
@@ -716,7 +715,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
716
715
  - !ruby/object:Gem::Version
717
716
  version: '0'
718
717
  requirements: []
719
- rubygems_version: 3.4.10
718
+ rubygems_version: 3.5.22
720
719
  signing_key:
721
720
  specification_version: 4
722
721
  summary: A simple Ruby client for the algolia.com REST API