diff --git a/apis/connect/2017-08-08/api-2.json b/apis/connect/2017-08-08/api-2.json index f2cba50d981..1f1865f7b1c 100644 --- a/apis/connect/2017-08-08/api-2.json +++ b/apis/connect/2017-08-08/api-2.json @@ -531,6 +531,24 @@ {"shape":"InternalServiceException"} ] }, + "CreateHoursOfOperationOverride":{ + "name":"CreateHoursOfOperationOverride", + "http":{ + "method":"PUT", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides" + }, + "input":{"shape":"CreateHoursOfOperationOverrideRequest"}, + "output":{"shape":"CreateHoursOfOperationOverrideResponse"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ] + }, "CreateInstance":{ "name":"CreateInstance", "http":{ @@ -1015,6 +1033,21 @@ {"shape":"InternalServiceException"} ] }, + "DeleteHoursOfOperationOverride":{ + "name":"DeleteHoursOfOperationOverride", + "http":{ + "method":"DELETE", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + }, + "input":{"shape":"DeleteHoursOfOperationOverrideRequest"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ] + }, "DeleteInstance":{ "name":"DeleteInstance", "http":{ @@ -1444,6 +1477,22 @@ {"shape":"InternalServiceException"} ] }, + "DescribeHoursOfOperationOverride":{ + "name":"DescribeHoursOfOperationOverride", + "http":{ + "method":"GET", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + }, + "input":{"shape":"DescribeHoursOfOperationOverrideRequest"}, + "output":{"shape":"DescribeHoursOfOperationOverrideResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ] + }, "DescribeInstance":{ "name":"DescribeInstance", "http":{ @@ -1993,6 +2042,22 @@ {"shape":"ResourceNotFoundException"} ] }, + "GetEffectiveHoursOfOperations":{ + "name":"GetEffectiveHoursOfOperations", + "http":{ + "method":"GET", + "requestUri":"/effective-hours-of-operations/{InstanceId}/{HoursOfOperationId}" + }, + "input":{"shape":"GetEffectiveHoursOfOperationsRequest"}, + "output":{"shape":"GetEffectiveHoursOfOperationsResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ] + }, "GetFederationToken":{ "name":"GetFederationToken", "http":{ @@ -2362,6 +2427,22 @@ {"shape":"ThrottlingException"} ] }, + "ListHoursOfOperationOverrides":{ + "name":"ListHoursOfOperationOverrides", + "http":{ + "method":"GET", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides" + }, + "input":{"shape":"ListHoursOfOperationOverridesRequest"}, + "output":{"shape":"ListHoursOfOperationOverridesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ] + }, "ListHoursOfOperations":{ "name":"ListHoursOfOperations", "http":{ @@ -3089,6 +3170,22 @@ {"shape":"InternalServiceException"} ] }, + "SearchHoursOfOperationOverrides":{ + "name":"SearchHoursOfOperationOverrides", + "http":{ + "method":"POST", + "requestUri":"/search-hours-of-operation-overrides" + }, + "input":{"shape":"SearchHoursOfOperationOverridesRequest"}, + "output":{"shape":"SearchHoursOfOperationOverridesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"} + ] + }, "SearchHoursOfOperations":{ "name":"SearchHoursOfOperations", "http":{ @@ -3913,6 +4010,23 @@ {"shape":"InternalServiceException"} ] }, + "UpdateHoursOfOperationOverride":{ + "name":"UpdateHoursOfOperationOverride", + "http":{ + "method":"POST", + "requestUri":"/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + }, + "input":{"shape":"UpdateHoursOfOperationOverrideRequest"}, + "errors":[ + {"shape":"DuplicateResourceException"}, + {"shape":"InvalidRequestException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServiceException"}, + {"shape":"ConditionalOperationFailedException"} + ] + }, "UpdateInstanceAttribute":{ "name":"UpdateInstanceAttribute", "http":{ @@ -5652,6 +5766,14 @@ "type":"list", "member":{"shape":"CommonAttributeAndCondition"} }, + "CommonHumanReadableDescription":{ + "type":"string", + "pattern":"^[\\P{C}\\r\\n\\t]{1,250}$" + }, + "CommonHumanReadableName":{ + "type":"string", + "pattern":"^[\\P{C}\\r\\n\\t]{1,127}$" + }, "CommonNameLength127":{ "type":"string", "max":127, @@ -5883,7 +6005,9 @@ "members":{ "OrConditions":{"shape":"ContactFlowModuleSearchConditionList"}, "AndConditions":{"shape":"ContactFlowModuleSearchConditionList"}, - "StringCondition":{"shape":"StringCondition"} + "StringCondition":{"shape":"StringCondition"}, + "StateCondition":{"shape":"ContactFlowModuleState"}, + "StatusCondition":{"shape":"ContactFlowModuleStatus"} } }, "ContactFlowModuleSearchFilter":{ @@ -6410,6 +6534,40 @@ "EvaluationFormArn":{"shape":"ARN"} } }, + "CreateHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "Name", + "Config", + "EffectiveFrom", + "EffectiveTill" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "Name":{"shape":"CommonHumanReadableName"}, + "Description":{"shape":"CommonHumanReadableDescription"}, + "Config":{"shape":"HoursOfOperationOverrideConfigList"}, + "EffectiveFrom":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"}, + "EffectiveTill":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"} + } + }, + "CreateHoursOfOperationOverrideResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationOverrideId":{"shape":"HoursOfOperationOverrideId"} + } + }, "CreateHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -7145,6 +7303,24 @@ "type":"list", "member":{"shape":"DataSetId"} }, + "DateComparisonType":{ + "type":"string", + "enum":[ + "GREATER_THAN", + "LESS_THAN", + "GREATER_THAN_OR_EQUAL_TO", + "LESS_THAN_OR_EQUAL_TO", + "EQUAL_TO" + ] + }, + "DateCondition":{ + "type":"structure", + "members":{ + "FieldName":{"shape":"String"}, + "Value":{"shape":"DateYearMonthDayFormat"}, + "ComparisonType":{"shape":"DateComparisonType"} + } + }, "DateReference":{ "type":"structure", "members":{ @@ -7152,6 +7328,10 @@ "Value":{"shape":"ReferenceValue"} } }, + "DateYearMonthDayFormat":{ + "type":"string", + "pattern":"^\\d{4}-\\d{2}-\\d{2}$" + }, "DeactivateEvaluationFormRequest":{ "type":"structure", "required":[ @@ -7356,6 +7536,31 @@ } } }, + "DeleteHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "HoursOfOperationOverrideId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "location":"uri", + "locationName":"HoursOfOperationOverrideId" + } + } + }, "DeleteHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -7976,6 +8181,37 @@ "EvaluationForm":{"shape":"EvaluationForm"} } }, + "DescribeHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "HoursOfOperationOverrideId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "location":"uri", + "locationName":"HoursOfOperationOverrideId" + } + } + }, + "DescribeHoursOfOperationOverrideResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationOverride":{"shape":"HoursOfOperationOverride"} + } + }, "DescribeHoursOfOperationRequest":{ "type":"structure", "required":[ @@ -8841,6 +9077,17 @@ "exception":true }, "DurationInSeconds":{"type":"integer"}, + "EffectiveHoursOfOperationList":{ + "type":"list", + "member":{"shape":"EffectiveHoursOfOperations"} + }, + "EffectiveHoursOfOperations":{ + "type":"structure", + "members":{ + "Date":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"}, + "OperationalHours":{"shape":"OperationalHours"} + } + }, "Email":{ "type":"string", "sensitive":true @@ -9900,6 +10147,44 @@ "ApproximateTotalCount":{"shape":"ApproximateTotalCount"} } }, + "GetEffectiveHoursOfOperationsRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "FromDate", + "ToDate" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "FromDate":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "location":"querystring", + "locationName":"fromDate" + }, + "ToDate":{ + "shape":"HoursOfOperationOverrideYearMonthDayDateFormat", + "location":"querystring", + "locationName":"toDate" + } + } + }, + "GetEffectiveHoursOfOperationsResponse":{ + "type":"structure", + "members":{ + "EffectiveHoursOfOperationList":{"shape":"EffectiveHoursOfOperationList"}, + "TimeZone":{"shape":"TimeZone"} + } + }, "GetFederationTokenRequest":{ "type":"structure", "required":["InstanceId"], @@ -10410,6 +10695,59 @@ "member":{"shape":"HoursOfOperation"} }, "HoursOfOperationName":{"type":"string"}, + "HoursOfOperationOverride":{ + "type":"structure", + "members":{ + "HoursOfOperationOverrideId":{"shape":"HoursOfOperationOverrideId"}, + "HoursOfOperationId":{"shape":"HoursOfOperationId"}, + "HoursOfOperationArn":{"shape":"ARN"}, + "Name":{"shape":"CommonHumanReadableName"}, + "Description":{"shape":"CommonHumanReadableDescription"}, + "Config":{"shape":"HoursOfOperationOverrideConfigList"}, + "EffectiveFrom":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"}, + "EffectiveTill":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"} + } + }, + "HoursOfOperationOverrideConfig":{ + "type":"structure", + "members":{ + "Day":{"shape":"OverrideDays"}, + "StartTime":{"shape":"OverrideTimeSlice"}, + "EndTime":{"shape":"OverrideTimeSlice"} + } + }, + "HoursOfOperationOverrideConfigList":{ + "type":"list", + "member":{"shape":"HoursOfOperationOverrideConfig"}, + "max":100, + "min":0 + }, + "HoursOfOperationOverrideId":{ + "type":"string", + "max":36, + "min":1 + }, + "HoursOfOperationOverrideList":{ + "type":"list", + "member":{"shape":"HoursOfOperationOverride"} + }, + "HoursOfOperationOverrideSearchConditionList":{ + "type":"list", + "member":{"shape":"HoursOfOperationOverrideSearchCriteria"} + }, + "HoursOfOperationOverrideSearchCriteria":{ + "type":"structure", + "members":{ + "OrConditions":{"shape":"HoursOfOperationOverrideSearchConditionList"}, + "AndConditions":{"shape":"HoursOfOperationOverrideSearchConditionList"}, + "StringCondition":{"shape":"StringCondition"}, + "DateCondition":{"shape":"DateCondition"} + } + }, + "HoursOfOperationOverrideYearMonthDayDateFormat":{ + "type":"string", + "pattern":"^\\d{4}-\\d{2}-\\d{2}$" + }, "HoursOfOperationSearchConditionList":{ "type":"list", "member":{"shape":"HoursOfOperationSearchCriteria"} @@ -11417,6 +11755,45 @@ "NextToken":{"shape":"NextToken"} } }, + "ListHoursOfOperationOverridesRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "NextToken":{ + "shape":"NextToken", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResult100", + "box":true, + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListHoursOfOperationOverridesResponse":{ + "type":"structure", + "members":{ + "NextToken":{"shape":"NextToken"}, + "HoursOfOperationOverrideList":{"shape":"HoursOfOperationOverrideList"}, + "LastModifiedRegion":{"shape":"RegionName"}, + "LastModifiedTime":{"shape":"Timestamp"} + } + }, "ListHoursOfOperationsRequest":{ "type":"structure", "required":["InstanceId"], @@ -12809,6 +13186,17 @@ "max":128, "min":0 }, + "OperationalHour":{ + "type":"structure", + "members":{ + "Start":{"shape":"OverrideTimeSlice"}, + "End":{"shape":"OverrideTimeSlice"} + } + }, + "OperationalHours":{ + "type":"list", + "member":{"shape":"OperationalHour"} + }, "Origin":{ "type":"string", "max":267 @@ -12899,6 +13287,35 @@ "error":{"httpStatusCode":404}, "exception":true }, + "OverrideDays":{ + "type":"string", + "enum":[ + "SUNDAY", + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY" + ] + }, + "OverrideTimeSlice":{ + "type":"structure", + "required":[ + "Hours", + "Minutes" + ], + "members":{ + "Hours":{ + "shape":"Hours24Format", + "box":true + }, + "Minutes":{ + "shape":"MinutesLimit60", + "box":true + } + } + }, "PEM":{ "type":"string", "max":1024, @@ -14975,6 +15392,28 @@ "ApproximateTotalCount":{"shape":"ApproximateTotalCount"} } }, + "SearchHoursOfOperationOverridesRequest":{ + "type":"structure", + "required":["InstanceId"], + "members":{ + "InstanceId":{"shape":"InstanceId"}, + "NextToken":{"shape":"NextToken2500"}, + "MaxResults":{ + "shape":"MaxResult100", + "box":true + }, + "SearchFilter":{"shape":"HoursOfOperationSearchFilter"}, + "SearchCriteria":{"shape":"HoursOfOperationOverrideSearchCriteria"} + } + }, + "SearchHoursOfOperationOverridesResponse":{ + "type":"structure", + "members":{ + "HoursOfOperationOverrides":{"shape":"HoursOfOperationOverrideList"}, + "NextToken":{"shape":"NextToken2500"}, + "ApproximateTotalCount":{"shape":"ApproximateTotalCount"} + } + }, "SearchHoursOfOperationsRequest":{ "type":"structure", "required":["InstanceId"], @@ -17166,6 +17605,36 @@ "max":250, "min":0 }, + "UpdateHoursOfOperationOverrideRequest":{ + "type":"structure", + "required":[ + "InstanceId", + "HoursOfOperationId", + "HoursOfOperationOverrideId" + ], + "members":{ + "InstanceId":{ + "shape":"InstanceId", + "location":"uri", + "locationName":"InstanceId" + }, + "HoursOfOperationId":{ + "shape":"HoursOfOperationId", + "location":"uri", + "locationName":"HoursOfOperationId" + }, + "HoursOfOperationOverrideId":{ + "shape":"HoursOfOperationOverrideId", + "location":"uri", + "locationName":"HoursOfOperationOverrideId" + }, + "Name":{"shape":"CommonHumanReadableName"}, + "Description":{"shape":"CommonHumanReadableDescription"}, + "Config":{"shape":"HoursOfOperationOverrideConfigList"}, + "EffectiveFrom":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"}, + "EffectiveTill":{"shape":"HoursOfOperationOverrideYearMonthDayDateFormat"} + } + }, "UpdateHoursOfOperationRequest":{ "type":"structure", "required":[ diff --git a/apis/connect/2017-08-08/docs-2.json b/apis/connect/2017-08-08/docs-2.json index 8efe6e54952..3518189ddbb 100644 --- a/apis/connect/2017-08-08/docs-2.json +++ b/apis/connect/2017-08-08/docs-2.json @@ -32,6 +32,7 @@ "CreateEmailAddress": "

Create new email address in the specified Amazon Connect instance. For more information about email addresses, see Create email addresses in the Amazon Connect Administrator Guide.

", "CreateEvaluationForm": "

Creates an evaluation form in the specified Amazon Connect instance. The form can be used to define questions related to agent performance, and create sections to organize such questions. Question and section identifiers cannot be duplicated within the same evaluation form.

", "CreateHoursOfOperation": "

This API is in preview release for Amazon Connect and is subject to change.

Creates hours of operation.

", + "CreateHoursOfOperationOverride": "

Creates an hours of operation override in an Amazon Connect hours of operation resource

", "CreateInstance": "

This API is in preview release for Amazon Connect and is subject to change.

Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does not allow for any configurations on features, such as Contact Lens for Amazon Connect.

For more information, see Create an Amazon Connect instance in the Amazon Connect Administrator Guide.

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. You must wait 30 days before you can restart creating and deleting instances in your account.

", "CreateIntegrationAssociation": "

Creates an Amazon Web Services resource association with an Amazon Connect instance.

", "CreateParticipant": "

Adds a new participant into an on-going chat contact. For more information, see Customize chat flow experiences by integrating custom participants.

", @@ -39,7 +40,7 @@ "CreatePredefinedAttribute": "

Creates a new predefined attribute for the specified Amazon Connect instance. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

", "CreatePrompt": "

Creates a prompt. For more information about prompts, such as supported file types and maximum length, see Create prompts in the Amazon Connect Administrator Guide.

", "CreatePushNotificationRegistration": "

Creates registration for a device token and a chat contact to receive real-time push notifications. For more information about push notifications, see Set up push notifications in Amazon Connect for mobile chat in the Amazon Connect Administrator Guide.

", - "CreateQueue": "

This API is in preview release for Amazon Connect and is subject to change.

Creates a new queue for the specified Amazon Connect instance.

", + "CreateQueue": "

Creates a new queue for the specified Amazon Connect instance.

", "CreateQuickConnect": "

Creates a quick connect for the specified Amazon Connect instance.

", "CreateRoutingProfile": "

Creates a new routing profile.

", "CreateRule": "

Creates a rule for the specified Amazon Connect instance.

Use the Rules Function language to code conditions for the rule.

", @@ -60,6 +61,7 @@ "DeleteEmailAddress": "

Deletes email address from the specified Amazon Connect instance.

", "DeleteEvaluationForm": "

Deletes an evaluation form in the specified Amazon Connect instance.

", "DeleteHoursOfOperation": "

This API is in preview release for Amazon Connect and is subject to change.

Deletes an hours of operation.

", + "DeleteHoursOfOperationOverride": "

Deletes an hours of operation override in an Amazon Connect hours of operation resource

", "DeleteInstance": "

This API is in preview release for Amazon Connect and is subject to change.

Deletes the Amazon Connect instance. For more information, see Delete your Amazon Connect instance in the Amazon Connect Administrator Guide.

Amazon Connect enforces a limit on the total number of instances that you can create or delete in 30 days. If you exceed this limit, you will get an error message indicating there has been an excessive number of attempts at creating or deleting instances. You must wait 30 days before you can restart creating and deleting instances in your account.

", "DeleteIntegrationAssociation": "

Deletes an Amazon Web Services resource association from an Amazon Connect instance. The association must not have any use cases associated with it.

", "DeletePredefinedAttribute": "

Deletes a predefined attribute from the specified Amazon Connect instance.

", @@ -87,6 +89,7 @@ "DescribeEmailAddress": "

Describe email address form the specified Amazon Connect instance.

", "DescribeEvaluationForm": "

Describes an evaluation form in the specified Amazon Connect instance. If the version property is not provided, the latest version of the evaluation form is described.

", "DescribeHoursOfOperation": "

This API is in preview release for Amazon Connect and is subject to change.

Describes the hours of operation.

", + "DescribeHoursOfOperationOverride": "

Describes the hours of operation override.

", "DescribeInstance": "

This API is in preview release for Amazon Connect and is subject to change.

Returns the current state of the specified instance identifier. It tracks the instance while it is being created and returns an error status, if applicable.

If an instance is not created successfully, the instance status reason field returns details relevant to the reason. The instance in a failed state is returned only for 24 hours after the CreateInstance API was invoked.

", "DescribeInstanceAttribute": "

This API is in preview release for Amazon Connect and is subject to change.

Describes the specified instance attribute.

", "DescribeInstanceStorageConfig": "

This API is in preview release for Amazon Connect and is subject to change.

Retrieves the current storage configurations for the specified resource type, association ID, and instance ID.

", @@ -122,6 +125,7 @@ "GetContactAttributes": "

Retrieves the contact attributes for the specified contact.

", "GetCurrentMetricData": "

Gets the real-time metric data from the specified Amazon Connect instance.

For a description of each metric, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

", "GetCurrentUserData": "

Gets the real-time active user data from the specified Amazon Connect instance.

", + "GetEffectiveHoursOfOperations": "

Get the hours of operations with the effective override applied.

", "GetFederationToken": "

Supports SAML sign-in for Amazon Connect. Retrieves a token for federation. The token is for the Amazon Connect user which corresponds to the IAM credentials that were used to invoke this action.

For more information about how SAML sign-in works in Amazon Connect, see Configure SAML with IAM for Amazon Connect in the Amazon Connect Administrator Guide.

This API doesn't support root users. If you try to invoke GetFederationToken with root credentials, an error message similar to the following one appears:

Provided identity: Principal: .... User: .... cannot be used for federation with Amazon Connect

", "GetFlowAssociation": "

Retrieves the flow associated for a given resource.

", "GetMetricData": "

Gets historical metric data from the specified Amazon Connect instance.

For a description of each historical metric, see Historical Metrics Definitions in the Amazon Connect Administrator Guide.

We recommend using the GetMetricDataV2 API. It provides more flexibility, features, and the ability to query longer time ranges than GetMetricData. Use it to retrieve historical agent and contact metrics for the last 3 months, at varying intervals. You can also use it to build custom dashboards to measure historical queue and agent performance. For example, you can track the number of incoming contacts for the last 7 days, with data split by day, to see how contact volume changed per day of the week.

", @@ -145,6 +149,7 @@ "ListEvaluationFormVersions": "

Lists versions of an evaluation form in the specified Amazon Connect instance.

", "ListEvaluationForms": "

Lists evaluation forms in the specified Amazon Connect instance.

", "ListFlowAssociations": "

List the flow association based on the filters.

", + "ListHoursOfOperationOverrides": "

List the hours of operation overrides.

", "ListHoursOfOperations": "

Provides information about the hours of operation for the specified Amazon Connect instance.

For more information about hours of operation, see Set the Hours of Operation for a Queue in the Amazon Connect Administrator Guide.

", "ListInstanceAttributes": "

This API is in preview release for Amazon Connect and is subject to change.

Returns a paginated list of all attribute types for the given instance.

", "ListInstanceStorageConfigs": "

This API is in preview release for Amazon Connect and is subject to change.

Returns a paginated list of storage configs for the identified instance and resource type.

", @@ -190,6 +195,7 @@ "SearchContactFlows": "

Searches the flows in an Amazon Connect instance, with optional filtering.

", "SearchContacts": "

Searches contacts in an Amazon Connect instance.

", "SearchEmailAddresses": "

Searches email address in an instance, with optional filtering.

", + "SearchHoursOfOperationOverrides": "

Searches the hours of operation overrides.

", "SearchHoursOfOperations": "

Searches the hours of operation in an Amazon Connect instance, with optional filtering.

", "SearchPredefinedAttributes": "

Searches predefined attributes that meet certain criteria. Predefined attributes are attributes in an Amazon Connect instance that can be used to route contacts to an agent or pools of agents within a queue. For more information, see Create predefined attributes for routing contacts to agents.

", "SearchPrompts": "

Searches prompts in an Amazon Connect instance, with optional filtering.

", @@ -240,6 +246,7 @@ "UpdateEmailAddressMetadata": "

Updates an email address metadata. For more information about email addresses, see Create email addresses in the Amazon Connect Administrator Guide.

", "UpdateEvaluationForm": "

Updates details about a specific evaluation form version in the specified Amazon Connect instance. Question and section identifiers cannot be duplicated within the same evaluation form.

This operation does not support partial updates. Instead it does a full update of evaluation form content.

", "UpdateHoursOfOperation": "

This API is in preview release for Amazon Connect and is subject to change.

Updates the hours of operation.

", + "UpdateHoursOfOperationOverride": "

Update the hours of operation override.

", "UpdateInstanceAttribute": "

This API is in preview release for Amazon Connect and is subject to change.

Updates the value for the specified attribute type.

", "UpdateInstanceStorageConfig": "

This API is in preview release for Amazon Connect and is subject to change.

Updates an existing configuration for a resource type. This API is idempotent.

", "UpdateParticipantRoleConfig": "

Updates timeouts for when human chat participants are to be considered idle, and when agents are automatically disconnected from a chat due to idleness. You can set four timers:

For more information about how chat timeouts work, see Set up chat timeouts for human participants.

", @@ -365,6 +372,7 @@ "HierarchyGroupSummaryReference$Arn": "

The Amazon Resource Name (ARN) for the hierarchy group.

", "HierarchyLevel$Arn": "

The Amazon Resource Name (ARN) of the hierarchy level.

", "HoursOfOperation$HoursOfOperationArn": "

The Amazon Resource Name (ARN) for the hours of operation.

", + "HoursOfOperationOverride$HoursOfOperationArn": "

The Amazon Resource Name (ARN) for the hours of operation.

", "HoursOfOperationSummary$Arn": "

The Amazon Resource Name (ARN) of the hours of operation.

", "ImportPhoneNumberRequest$SourcePhoneNumberArn": "

The claimed phone number ARN being imported from the external service, such as Amazon Web Services End User Messaging. If it is from Amazon Web Services End User Messaging, it looks like the ARN of the phone number to import from Amazon Web Services End User Messaging.

", "ImportPhoneNumberResponse$PhoneNumberArn": "

The Amazon Resource Name (ARN) of the phone number.

", @@ -525,7 +533,7 @@ "AgentFirstName": { "base": null, "refs": { - "UserIdentityInfo$FirstName": "

The first name. This is required if you are using Amazon Connect or SAML for identity management.

", + "UserIdentityInfo$FirstName": "

The first name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.

", "UserIdentityInfoLite$FirstName": "

The user's first name.

" } }, @@ -566,7 +574,7 @@ "AgentLastName": { "base": null, "refs": { - "UserIdentityInfo$LastName": "

The last name. This is required if you are using Amazon Connect or SAML for identity management.

", + "UserIdentityInfo$LastName": "

The last name. This is required if you are using Amazon Connect or SAML for identity management. Inputs must be in Unicode Normalization Form C (NFC). Text containing characters in a non-NFC form (for example, decomposed characters or combining marks) are not accepted.

", "UserIdentityInfoLite$LastName": "

The user's last name.

" } }, @@ -811,6 +819,7 @@ "SearchContactFlowModulesResponse$ApproximateTotalCount": "

The total number of flows which matched your search query.

", "SearchContactFlowsResponse$ApproximateTotalCount": "

The total number of flows which matched your search query.

", "SearchEmailAddressesResponse$ApproximateTotalCount": "

The total number of email addresses which matched your search query.

", + "SearchHoursOfOperationOverridesResponse$ApproximateTotalCount": "

The total number of hours of operations which matched your search query.

", "SearchHoursOfOperationsResponse$ApproximateTotalCount": "

The total number of hours of operations which matched your search query.

", "SearchPredefinedAttributesResponse$ApproximateTotalCount": "

The approximate number of predefined attributes which matched your search query.

", "SearchPromptsResponse$ApproximateTotalCount": "

The total number of quick connects which matched your search query.

", @@ -1465,6 +1474,22 @@ "ControlPlaneAttributeFilter$OrConditions": "

A list of conditions which would be applied together with an OR condition.

" } }, + "CommonHumanReadableDescription": { + "base": null, + "refs": { + "CreateHoursOfOperationOverrideRequest$Description": "

The description of the hours of operation override.

", + "HoursOfOperationOverride$Description": "

The description of the hours of operation override.

", + "UpdateHoursOfOperationOverrideRequest$Description": "

The description of the hours of operation override.

" + } + }, + "CommonHumanReadableName": { + "base": null, + "refs": { + "CreateHoursOfOperationOverrideRequest$Name": "

The name of the hours of operation override.

", + "HoursOfOperationOverride$Name": "

The name of the hours of operation override.

", + "UpdateHoursOfOperationOverrideRequest$Name": "

The name of the hours of operation override.

" + } + }, "CommonNameLength127": { "base": null, "refs": { @@ -1514,7 +1539,7 @@ } }, "ConditionalOperationFailedException": { - "base": "

A conditional check failed.

", + "base": "

Request processing failed because dependent condition failed.

", "refs": { } }, @@ -1707,6 +1732,7 @@ "base": null, "refs": { "ContactFlowModule$State": "

The type of flow module.

", + "ContactFlowModuleSearchCriteria$StateCondition": "

The state of the flow.

", "ContactFlowModuleSummary$State": "

The type of flow module.

", "ListContactFlowModulesRequest$ContactFlowModuleState": "

The state of the flow module.

", "UpdateContactFlowModuleMetadataRequest$State": "

The state of flow module.

" @@ -1715,7 +1741,8 @@ "ContactFlowModuleStatus": { "base": null, "refs": { - "ContactFlowModule$Status": "

The status of the flow module.

" + "ContactFlowModule$Status": "

The status of the flow module.

", + "ContactFlowModuleSearchCriteria$StatusCondition": "

The status of the flow.

" } }, "ContactFlowModuleSummary": { @@ -2135,6 +2162,16 @@ "refs": { } }, + "CreateHoursOfOperationOverrideRequest": { + "base": null, + "refs": { + } + }, + "CreateHoursOfOperationOverrideResponse": { + "base": null, + "refs": { + } + }, "CreateHoursOfOperationRequest": { "base": null, "refs": { @@ -2466,12 +2503,30 @@ "BatchDisassociateAnalyticsDataSetResponse$Deleted": "

An array of successfully disassociated dataset identifiers.

" } }, + "DateComparisonType": { + "base": null, + "refs": { + "DateCondition$ComparisonType": "

An object to specify the hours of operation override date condition comparisonType.

" + } + }, + "DateCondition": { + "base": "

An object to specify the hours of operation override date condition.

", + "refs": { + "HoursOfOperationOverrideSearchCriteria$DateCondition": "

A leaf node condition which can be used to specify a date condition.

" + } + }, "DateReference": { "base": "

Information about a reference when the referenceType is DATE. Otherwise, null.

", "refs": { "ReferenceSummary$Date": "

Information about a reference when the referenceType is DATE. Otherwise, null.

" } }, + "DateYearMonthDayFormat": { + "base": null, + "refs": { + "DateCondition$Value": "

An object to specify the hours of operation override date value.

" + } + }, "DeactivateEvaluationFormRequest": { "base": null, "refs": { @@ -2551,6 +2606,11 @@ "refs": { } }, + "DeleteHoursOfOperationOverrideRequest": { + "base": null, + "refs": { + } + }, "DeleteHoursOfOperationRequest": { "base": null, "refs": { @@ -2756,6 +2816,16 @@ "refs": { } }, + "DescribeHoursOfOperationOverrideRequest": { + "base": null, + "refs": { + } + }, + "DescribeHoursOfOperationOverrideResponse": { + "base": null, + "refs": { + } + }, "DescribeHoursOfOperationRequest": { "base": null, "refs": { @@ -3195,6 +3265,18 @@ "RoutingCriteriaInputStepExpiry$DurationInSeconds": "

The number of seconds that the contact will be routed only to agents matching this routing
 step, if expiry was configured for this routing step.

" } }, + "EffectiveHoursOfOperationList": { + "base": null, + "refs": { + "GetEffectiveHoursOfOperationsResponse$EffectiveHoursOfOperationList": "

Information about the effective hours of operations

" + } + }, + "EffectiveHoursOfOperations": { + "base": "

Information about the hours of operations with the effective override applied.

", + "refs": { + "EffectiveHoursOfOperationList$member": null + } + }, "Email": { "base": null, "refs": { @@ -4062,6 +4144,16 @@ "refs": { } }, + "GetEffectiveHoursOfOperationsRequest": { + "base": null, + "refs": { + } + }, + "GetEffectiveHoursOfOperationsResponse": { + "base": null, + "refs": { + } + }, "GetFederationTokenRequest": { "base": null, "refs": { @@ -4382,7 +4474,8 @@ "Hours24Format": { "base": null, "refs": { - "HoursOfOperationTimeSlice$Hours": "

The hours.

" + "HoursOfOperationTimeSlice$Hours": "

The hours.

", + "OverrideTimeSlice$Hours": "

The hours.

" } }, "HoursOfOperation": { @@ -4422,13 +4515,20 @@ "HoursOfOperationId": { "base": null, "refs": { + "CreateHoursOfOperationOverrideRequest$HoursOfOperationId": "

The identifier for the hours of operation

", "CreateHoursOfOperationResponse$HoursOfOperationId": "

The identifier for the hours of operation.

", "CreateQueueRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", + "DeleteHoursOfOperationOverrideRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", "DeleteHoursOfOperationRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", + "DescribeHoursOfOperationOverrideRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", "DescribeHoursOfOperationRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", + "GetEffectiveHoursOfOperationsRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", "HoursOfOperation$HoursOfOperationId": "

The identifier for the hours of operation.

", + "HoursOfOperationOverride$HoursOfOperationId": "

The identifier for the hours of operation.

", "HoursOfOperationSummary$Id": "

The identifier of the hours of operation.

", + "ListHoursOfOperationOverridesRequest$HoursOfOperationId": "

The identifier for the hours of operation

", "Queue$HoursOfOperationId": "

The identifier for the hours of operation.

", + "UpdateHoursOfOperationOverrideRequest$HoursOfOperationId": "

The identifier for the hours of operation.

", "UpdateHoursOfOperationRequest$HoursOfOperationId": "

The identifier of the hours of operation.

", "UpdateQueueHoursOfOperationRequest$HoursOfOperationId": "

The identifier for the hours of operation.

" } @@ -4445,6 +4545,72 @@ "HoursOfOperationSummary$Name": "

The name of the hours of operation.

" } }, + "HoursOfOperationOverride": { + "base": "

Information about the hours of operations override.

", + "refs": { + "DescribeHoursOfOperationOverrideResponse$HoursOfOperationOverride": "

Information about the hours of operations override.

", + "HoursOfOperationOverrideList$member": null + } + }, + "HoursOfOperationOverrideConfig": { + "base": "

Information about the hours of operation override config: day, start time, and end time.

", + "refs": { + "HoursOfOperationOverrideConfigList$member": null + } + }, + "HoursOfOperationOverrideConfigList": { + "base": null, + "refs": { + "CreateHoursOfOperationOverrideRequest$Config": "

Configuration information for the hours of operation override: day, start time, and end time.

", + "HoursOfOperationOverride$Config": "

Configuration information for the hours of operation override: day, start time, and end time.

", + "UpdateHoursOfOperationOverrideRequest$Config": "

Configuration information for the hours of operation override: day, start time, and end time.

" + } + }, + "HoursOfOperationOverrideId": { + "base": null, + "refs": { + "CreateHoursOfOperationOverrideResponse$HoursOfOperationOverrideId": "

The identifier for the hours of operation override.

", + "DeleteHoursOfOperationOverrideRequest$HoursOfOperationOverrideId": "

The identifier for the hours of operation override.

", + "DescribeHoursOfOperationOverrideRequest$HoursOfOperationOverrideId": "

The identifier for the hours of operation override.

", + "HoursOfOperationOverride$HoursOfOperationOverrideId": "

The identifier for the hours of operation override.

", + "UpdateHoursOfOperationOverrideRequest$HoursOfOperationOverrideId": "

The identifier for the hours of operation override.

" + } + }, + "HoursOfOperationOverrideList": { + "base": null, + "refs": { + "ListHoursOfOperationOverridesResponse$HoursOfOperationOverrideList": "

Information about the hours of operation override.

", + "SearchHoursOfOperationOverridesResponse$HoursOfOperationOverrides": "

Information about the hours of operations overrides.

" + } + }, + "HoursOfOperationOverrideSearchConditionList": { + "base": null, + "refs": { + "HoursOfOperationOverrideSearchCriteria$OrConditions": "

A list of conditions which would be applied together with an OR condition.

", + "HoursOfOperationOverrideSearchCriteria$AndConditions": "

A list of conditions which would be applied together with an AND condition.

" + } + }, + "HoursOfOperationOverrideSearchCriteria": { + "base": "

The search criteria to be used to return hours of operations overrides.

", + "refs": { + "HoursOfOperationOverrideSearchConditionList$member": null, + "SearchHoursOfOperationOverridesRequest$SearchCriteria": "

The search criteria to be used to return hours of operations overrides.

" + } + }, + "HoursOfOperationOverrideYearMonthDayDateFormat": { + "base": null, + "refs": { + "CreateHoursOfOperationOverrideRequest$EffectiveFrom": "

The date from when the hours of operation override would be effective.

", + "CreateHoursOfOperationOverrideRequest$EffectiveTill": "

The date until when the hours of operation override would be effective.

", + "EffectiveHoursOfOperations$Date": "

The date that the hours of operation or overrides applies to.

", + "GetEffectiveHoursOfOperationsRequest$FromDate": "

The Date from when the hours of operation are listed.

", + "GetEffectiveHoursOfOperationsRequest$ToDate": "

The Date until when the hours of operation are listed.

", + "HoursOfOperationOverride$EffectiveFrom": "

The date from which the hours of operation override would be effective.

", + "HoursOfOperationOverride$EffectiveTill": "

The date till which the hours of operation override would be effective.

", + "UpdateHoursOfOperationOverrideRequest$EffectiveFrom": "

The date from when the hours of operation override would be effective.

", + "UpdateHoursOfOperationOverrideRequest$EffectiveTill": "

The date till when the hours of operation override would be effective.

" + } + }, "HoursOfOperationSearchConditionList": { "base": null, "refs": { @@ -4462,6 +4628,7 @@ "HoursOfOperationSearchFilter": { "base": "

Filters to be applied to search results.

", "refs": { + "SearchHoursOfOperationOverridesRequest$SearchFilter": null, "SearchHoursOfOperationsRequest$SearchFilter": "

Filters to be applied to search results.

" } }, @@ -4635,6 +4802,7 @@ "CreateContactRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "CreateEmailAddressRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "CreateEvaluationFormRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "CreateHoursOfOperationOverrideRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "CreateHoursOfOperationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "CreateInstanceResponse$Id": "

The identifier for the instance.

", "CreateIntegrationAssociationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -4661,6 +4829,7 @@ "DeleteContactFlowRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DeleteEmailAddressRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DeleteEvaluationFormRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "DeleteHoursOfOperationOverrideRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "DeleteHoursOfOperationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DeleteInstanceRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DeleteIntegrationAssociationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -4685,6 +4854,7 @@ "DescribeContactRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DescribeEmailAddressRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DescribeEvaluationFormRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "DescribeHoursOfOperationOverrideRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "DescribeHoursOfOperationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DescribeInstanceAttributeRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "DescribeInstanceRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -4718,6 +4888,7 @@ "GetContactAttributesRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "GetCurrentMetricDataRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "GetCurrentUserDataRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "GetEffectiveHoursOfOperationsRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "GetFederationTokenRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "GetFlowAssociationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "GetMetricDataRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -4743,6 +4914,7 @@ "ListEvaluationFormVersionsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "ListEvaluationFormsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "ListFlowAssociationsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "ListHoursOfOperationOverridesRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "ListHoursOfOperationsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "ListInstanceAttributesRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "ListInstanceStorageConfigsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -4783,6 +4955,7 @@ "SearchContactFlowsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "SearchContactsRequest$InstanceId": "

The identifier of Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "SearchEmailAddressesRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "SearchHoursOfOperationOverridesRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "SearchHoursOfOperationsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "SearchPredefinedAttributesRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "SearchPromptsRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -4830,6 +5003,7 @@ "UpdateContactScheduleRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "UpdateEmailAddressMetadataRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "UpdateEvaluationFormRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", + "UpdateHoursOfOperationOverrideRequest$InstanceId": "

The identifier of the Amazon Connect instance.

", "UpdateHoursOfOperationRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "UpdateInstanceAttributeRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", "UpdateInstanceStorageConfigRequest$InstanceId": "

The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance.

", @@ -5320,6 +5494,16 @@ "refs": { } }, + "ListHoursOfOperationOverridesRequest": { + "base": null, + "refs": { + } + }, + "ListHoursOfOperationOverridesResponse": { + "base": null, + "refs": { + } + }, "ListHoursOfOperationsRequest": { "base": null, "refs": { @@ -5685,6 +5869,7 @@ "ListDefaultVocabulariesRequest$MaxResults": "

The maximum number of results to return per page.

", "ListEvaluationFormVersionsRequest$MaxResults": "

The maximum number of results to return per page.

", "ListEvaluationFormsRequest$MaxResults": "

The maximum number of results to return per page.

", + "ListHoursOfOperationOverridesRequest$MaxResults": "

The maximum number of results to return per page. The default MaxResult size is 100. Valid Range: Minimum value of 1. Maximum value of 1000.

", "ListIntegrationAssociationsRequest$MaxResults": "

The maximum number of results to return per page.

", "ListPredefinedAttributesRequest$MaxResults": "

The maximum number of results to return per page.

", "ListQueueQuickConnectsRequest$MaxResults": "

The maximum number of results to return per page. The default MaxResult size is 100.

", @@ -5698,6 +5883,7 @@ "SearchContactFlowsRequest$MaxResults": "

The maximum number of results to return per page.

", "SearchContactsRequest$MaxResults": "

The maximum number of results to return per page.

", "SearchEmailAddressesRequest$MaxResults": "

The maximum number of results to return per page.

", + "SearchHoursOfOperationOverridesRequest$MaxResults": "

The maximum number of results to return per page. Valid Range: Minimum value of 1. Maximum value of 100.

", "SearchHoursOfOperationsRequest$MaxResults": "

The maximum number of results to return per page.

", "SearchPredefinedAttributesRequest$MaxResults": "

The maximum number of results to return per page.

", "SearchPromptsRequest$MaxResults": "

The maximum number of results to return per page.

", @@ -5948,7 +6134,8 @@ "MinutesLimit60": { "base": null, "refs": { - "HoursOfOperationTimeSlice$Minutes": "

The minutes.

" + "HoursOfOperationTimeSlice$Minutes": "

The minutes.

", + "OverrideTimeSlice$Minutes": "

The minutes.

" } }, "MonitorCapability": { @@ -6041,6 +6228,8 @@ "ListEvaluationFormsResponse$NextToken": "

If there are additional results, this is the token for the next set of results.

", "ListFlowAssociationsRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", "ListFlowAssociationsResponse$NextToken": "

If there are additional results, this is the token for the next set of results.

", + "ListHoursOfOperationOverridesRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "ListHoursOfOperationOverridesResponse$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", "ListHoursOfOperationsRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", "ListHoursOfOperationsResponse$NextToken": "

If there are additional results, this is the token for the next set of results.

", "ListInstanceAttributesRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", @@ -6110,6 +6299,8 @@ "SearchContactFlowsRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", "SearchContactFlowsResponse$NextToken": "

If there are additional results, this is the token for the next set of results.

", "SearchEmailAddressesRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "SearchHoursOfOperationOverridesRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500.

", + "SearchHoursOfOperationOverridesResponse$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. Length Constraints: Minimum length of 1. Maximum length of 2500.

", "SearchHoursOfOperationsRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", "SearchHoursOfOperationsResponse$NextToken": "

If there are additional results, this is the token for the next set of results.

", "SearchPredefinedAttributesRequest$NextToken": "

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", @@ -6199,6 +6390,18 @@ "DeviceInfo$OperatingSystem": "

Operating system that the participant used for the call.

" } }, + "OperationalHour": { + "base": "

Information about the hours of operations with the effective override applied.

", + "refs": { + "OperationalHours$member": null + } + }, + "OperationalHours": { + "base": null, + "refs": { + "EffectiveHoursOfOperations$OperationalHours": "

Information about the hours of operations with the effective override applied.

" + } + }, "Origin": { "base": null, "refs": { @@ -6291,6 +6494,21 @@ "refs": { } }, + "OverrideDays": { + "base": null, + "refs": { + "HoursOfOperationOverrideConfig$Day": "

The day that the hours of operation override applies to.

" + } + }, + "OverrideTimeSlice": { + "base": "

The start time or end time for an hours of operation override.

", + "refs": { + "HoursOfOperationOverrideConfig$StartTime": "

The start time when your contact center opens if overrides are applied.

", + "HoursOfOperationOverrideConfig$EndTime": "

The end time that your contact center closes if overrides are applied.

", + "OperationalHour$Start": "

The start time that your contact center opens.

", + "OperationalHour$End": "

The end time that your contact center closes.

" + } + }, "PEM": { "base": null, "refs": { @@ -6336,7 +6554,7 @@ "ParticipantRole": { "base": null, "refs": { - "ContactConfiguration$ParticipantRole": "

The role of the participant in the chat conversation.

", + "ContactConfiguration$ParticipantRole": "

The role of the participant in the chat conversation.

Only CUSTOMER is currently supported. Any other values other than CUSTOMER will result in an exception (4xx error).

", "ParticipantDetailsToAdd$ParticipantRole": "

The role of the participant being added.

", "RealTimeContactAnalysisSegmentAttachments$ParticipantRole": "

The role of the participant. For example, is it a customer, agent, or system.

", "RealTimeContactAnalysisSegmentEvent$ParticipantRole": "

The role of the participant. For example, is it a customer, agent, or system.

", @@ -7488,6 +7706,7 @@ "HierarchyLevel$LastModifiedRegion": "

The Amazon Web Services Region where this resource was last modified.

", "HoursOfOperation$LastModifiedRegion": "

The Amazon Web Services Region where this resource was last modified.

", "HoursOfOperationSummary$LastModifiedRegion": "

The Amazon Web Services Region where this resource was last modified.

", + "ListHoursOfOperationOverridesResponse$LastModifiedRegion": "

The AWS Region where this resource was last modified.

", "ListQueueQuickConnectsResponse$LastModifiedRegion": "

The Amazon Web Services Region where this resource was last modified.

", "ListRoutingProfileQueuesResponse$LastModifiedRegion": "

The Amazon Web Services Region where this resource was last modified.

", "ListSecurityProfileApplicationsResponse$LastModifiedRegion": "

The Amazon Web Services Region where this resource was last modified.

", @@ -8062,6 +8281,16 @@ "refs": { } }, + "SearchHoursOfOperationOverridesRequest": { + "base": null, + "refs": { + } + }, + "SearchHoursOfOperationOverridesResponse": { + "base": null, + "refs": { + } + }, "SearchHoursOfOperationsRequest": { "base": null, "refs": { @@ -8753,6 +8982,7 @@ "refs": { "AnalyticsDataAssociationResult$ResourceShareId": "

The Resource Access Manager share ID.

", "AssociateAnalyticsDataSetResponse$ResourceShareId": "

The Resource Access Manager share ID that is generated.

", + "DateCondition$FieldName": "

An object to specify the hours of operation override date field.

", "ErrorResult$ErrorCode": "

The error code.

", "ErrorResult$ErrorMessage": "

The corresponding error message for the error code.

", "FailedRequest$FailureReasonMessage": "

Why the request to create a contact failed.

", @@ -8788,6 +9018,7 @@ "ContactFlowModuleSearchCriteria$StringCondition": null, "ContactFlowSearchCriteria$StringCondition": null, "EmailAddressSearchCriteria$StringCondition": null, + "HoursOfOperationOverrideSearchCriteria$StringCondition": null, "HoursOfOperationSearchCriteria$StringCondition": "

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name, description, timezone, and resourceID.

", "PredefinedAttributeSearchCriteria$StringCondition": null, "PromptSearchCriteria$StringCondition": "

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name, description, and resourceID.

", @@ -9260,6 +9491,7 @@ "base": null, "refs": { "CreateHoursOfOperationRequest$TimeZone": "

The time zone of the hours of operation.

", + "GetEffectiveHoursOfOperationsResponse$TimeZone": "

The time zone for the hours of operation.

", "HoursOfOperation$TimeZone": "

The time zone for the hours of operation.

", "UpdateHoursOfOperationRequest$TimeZone": "

The time zone of the hours of operation.

" } @@ -9306,6 +9538,7 @@ "HoursOfOperationSummary$LastModifiedTime": "

The timestamp when this resource was last modified.

", "Instance$CreatedTime": "

When the instance was created.

", "InstanceSummary$CreatedTime": "

When the instance was created.

", + "ListHoursOfOperationOverridesResponse$LastModifiedTime": "

The timestamp when this resource was last modified.

", "ListQueueQuickConnectsResponse$LastModifiedTime": "

The timestamp when this resource was last modified.

", "ListRoutingProfileQueuesResponse$LastModifiedTime": "

The timestamp when this resource was last modified.

", "ListSecurityProfileApplicationsResponse$LastModifiedTime": "

The timestamp when this resource was last modified.

", @@ -9653,6 +9886,11 @@ "UpdateHoursOfOperationRequest$Description": "

The description of the hours of operation.

" } }, + "UpdateHoursOfOperationOverrideRequest": { + "base": null, + "refs": { + } + }, "UpdateHoursOfOperationRequest": { "base": null, "refs": { diff --git a/apis/connect/2017-08-08/paginators-1.json b/apis/connect/2017-08-08/paginators-1.json index a7fdd3329e2..0015869326c 100644 --- a/apis/connect/2017-08-08/paginators-1.json +++ b/apis/connect/2017-08-08/paginators-1.json @@ -96,6 +96,16 @@ "output_token": "NextToken", "result_key": "FlowAssociationSummaryList" }, + "ListHoursOfOperationOverrides": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "LastModifiedRegion", + "LastModifiedTime" + ], + "output_token": "NextToken", + "result_key": "HoursOfOperationOverrideList" + }, "ListHoursOfOperations": { "input_token": "NextToken", "limit_key": "MaxResults", @@ -343,6 +353,15 @@ "output_token": "NextToken", "result_key": "Contacts" }, + "SearchHoursOfOperationOverrides": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "ApproximateTotalCount" + ], + "output_token": "NextToken", + "result_key": "HoursOfOperationOverrides" + }, "SearchHoursOfOperations": { "input_token": "NextToken", "limit_key": "MaxResults", diff --git a/apis/dms/2016-01-01/api-2.json b/apis/dms/2016-01-01/api-2.json index d012f3d4442..8669c433383 100644 --- a/apis/dms/2016-01-01/api-2.json +++ b/apis/dms/2016-01-01/api-2.json @@ -344,7 +344,8 @@ "output":{"shape":"DeleteEventSubscriptionResponse"}, "errors":[ {"shape":"ResourceNotFoundFault"}, - {"shape":"InvalidResourceStateFault"} + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} ] }, "DeleteFleetAdvisorCollector":{ @@ -442,7 +443,8 @@ "output":{"shape":"DeleteReplicationSubnetGroupResponse"}, "errors":[ {"shape":"InvalidResourceStateFault"}, - {"shape":"ResourceNotFoundFault"} + {"shape":"ResourceNotFoundFault"}, + {"shape":"AccessDeniedFault"} ] }, "DeleteReplicationTask":{ @@ -990,7 +992,8 @@ "output":{"shape":"DescribeTableStatisticsResponse"}, "errors":[ {"shape":"ResourceNotFoundFault"}, - {"shape":"InvalidResourceStateFault"} + {"shape":"InvalidResourceStateFault"}, + {"shape":"AccessDeniedFault"} ] }, "ExportMetadataModelAssessment":{ @@ -1107,7 +1110,8 @@ {"shape":"KMSDisabledFault"}, {"shape":"KMSInvalidStateFault"}, {"shape":"KMSNotFoundFault"}, - {"shape":"KMSThrottlingFault"} + {"shape":"KMSThrottlingFault"}, + {"shape":"AccessDeniedFault"} ] }, "ModifyInstanceProfile":{ @@ -2081,7 +2085,7 @@ "members":{ "ReplicationInstanceIdentifier":{"shape":"String"}, "AllocatedStorage":{"shape":"IntegerOptional"}, - "ReplicationInstanceClass":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"ReplicationInstanceClass"}, "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, "AvailabilityZone":{"shape":"String"}, "ReplicationSubnetGroupIdentifier":{"shape":"String"}, @@ -2094,7 +2098,8 @@ "PubliclyAccessible":{"shape":"BooleanOptional"}, "DnsNameServers":{"shape":"String"}, "ResourceIdentifier":{"shape":"String"}, - "NetworkType":{"shape":"String"} + "NetworkType":{"shape":"String"}, + "KerberosAuthenticationSettings":{"shape":"KerberosAuthenticationSettings"} } }, "CreateReplicationInstanceResponse":{ @@ -3758,7 +3763,8 @@ "SaslPassword":{"shape":"SecretString"}, "NoHexPrefix":{"shape":"BooleanOptional"}, "SaslMechanism":{"shape":"KafkaSaslMechanism"}, - "SslEndpointIdentificationAlgorithm":{"shape":"KafkaSslEndpointIdentificationAlgorithm"} + "SslEndpointIdentificationAlgorithm":{"shape":"KafkaSslEndpointIdentificationAlgorithm"}, + "UseLargeIntegerValue":{"shape":"BooleanOptional"} } }, "KafkaSslEndpointIdentificationAlgorithm":{ @@ -3768,6 +3774,14 @@ "https" ] }, + "KerberosAuthenticationSettings":{ + "type":"structure", + "members":{ + "KeyCacheSecretId":{"shape":"String"}, + "KeyCacheSecretIamArn":{"shape":"String"}, + "Krb5FileContents":{"shape":"String"} + } + }, "KeyList":{ "type":"list", "member":{"shape":"String"} @@ -3784,7 +3798,8 @@ "IncludeTableAlterOperations":{"shape":"BooleanOptional"}, "IncludeControlDetails":{"shape":"BooleanOptional"}, "IncludeNullAndEmpty":{"shape":"BooleanOptional"}, - "NoHexPrefix":{"shape":"BooleanOptional"} + "NoHexPrefix":{"shape":"BooleanOptional"}, + "UseLargeIntegerValue":{"shape":"BooleanOptional"} } }, "Limitation":{ @@ -3864,7 +3879,8 @@ "SecretsManagerSecretId":{"shape":"String"}, "TrimSpaceInChar":{"shape":"BooleanOptional"}, "TlogAccessMode":{"shape":"TlogAccessMode"}, - "ForceLobLookup":{"shape":"BooleanOptional"} + "ForceLobLookup":{"shape":"BooleanOptional"}, + "AuthenticationMethod":{"shape":"SqlServerAuthenticationMethod"} } }, "MicrosoftSqlServerDataProviderSettings":{ @@ -4091,7 +4107,7 @@ "ReplicationInstanceArn":{"shape":"String"}, "AllocatedStorage":{"shape":"IntegerOptional"}, "ApplyImmediately":{"shape":"Boolean"}, - "ReplicationInstanceClass":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"ReplicationInstanceClass"}, "VpcSecurityGroupIds":{"shape":"VpcSecurityGroupIdList"}, "PreferredMaintenanceWindow":{"shape":"String"}, "MultiAZ":{"shape":"BooleanOptional"}, @@ -4099,7 +4115,8 @@ "AllowMajorVersionUpgrade":{"shape":"Boolean"}, "AutoMinorVersionUpgrade":{"shape":"BooleanOptional"}, "ReplicationInstanceIdentifier":{"shape":"String"}, - "NetworkType":{"shape":"String"} + "NetworkType":{"shape":"String"}, + "KerberosAuthenticationSettings":{"shape":"KerberosAuthenticationSettings"} } }, "ModifyReplicationInstanceResponse":{ @@ -4250,6 +4267,13 @@ "one" ] }, + "OracleAuthenticationMethod":{ + "type":"string", + "enum":[ + "password", + "kerberos" + ] + }, "OracleDataProviderSettings":{ "type":"structure", "members":{ @@ -4310,14 +4334,15 @@ "SecretsManagerOracleAsmSecretId":{"shape":"String"}, "TrimSpaceInChar":{"shape":"BooleanOptional"}, "ConvertTimestampWithZoneToUTC":{"shape":"BooleanOptional"}, - "OpenTransactionWindow":{"shape":"IntegerOptional"} + "OpenTransactionWindow":{"shape":"IntegerOptional"}, + "AuthenticationMethod":{"shape":"OracleAuthenticationMethod"} } }, "OrderableReplicationInstance":{ "type":"structure", "members":{ "EngineVersion":{"shape":"String"}, - "ReplicationInstanceClass":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"ReplicationInstanceClass"}, "StorageType":{"shape":"String"}, "MinAllocatedStorage":{"shape":"Integer"}, "MaxAllocatedStorage":{"shape":"Integer"}, @@ -4398,7 +4423,8 @@ "MapJsonbAsClob":{"shape":"BooleanOptional"}, "MapLongVarcharAs":{"shape":"LongVarcharMappingType"}, "DatabaseMode":{"shape":"DatabaseMode"}, - "BabelfishDatabaseName":{"shape":"String"} + "BabelfishDatabaseName":{"shape":"String"}, + "DisableUnicodeSourceFilter":{"shape":"BooleanOptional"} } }, "PostgreSqlDataProviderSettings":{ @@ -4732,7 +4758,7 @@ "type":"structure", "members":{ "ReplicationInstanceIdentifier":{"shape":"String"}, - "ReplicationInstanceClass":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"ReplicationInstanceClass"}, "ReplicationInstanceStatus":{"shape":"String"}, "AllocatedStorage":{"shape":"Integer"}, "InstanceCreateTime":{"shape":"TStamp"}, @@ -4761,9 +4787,14 @@ "SecondaryAvailabilityZone":{"shape":"String"}, "FreeUntil":{"shape":"TStamp"}, "DnsNameServers":{"shape":"String"}, - "NetworkType":{"shape":"String"} + "NetworkType":{"shape":"String"}, + "KerberosAuthenticationSettings":{"shape":"KerberosAuthenticationSettings"} } }, + "ReplicationInstanceClass":{ + "type":"string", + "max":30 + }, "ReplicationInstanceIpv6AddressList":{ "type":"list", "member":{"shape":"String"} @@ -4799,7 +4830,7 @@ "ReplicationPendingModifiedValues":{ "type":"structure", "members":{ - "ReplicationInstanceClass":{"shape":"String"}, + "ReplicationInstanceClass":{"shape":"ReplicationInstanceClass"}, "AllocatedStorage":{"shape":"IntegerOptional"}, "MultiAZ":{"shape":"BooleanOptional"}, "EngineVersion":{"shape":"String"}, @@ -4881,7 +4912,7 @@ "AssessmentStatus":{"shape":"String"}, "AssessmentResultsFile":{"shape":"String"}, "AssessmentResults":{"shape":"String"}, - "S3ObjectUrl":{"shape":"String"} + "S3ObjectUrl":{"shape":"SecretString"} } }, "ReplicationTaskAssessmentResultList":{ @@ -5163,6 +5194,13 @@ "type":"string", "enum":["replication-instance"] }, + "SqlServerAuthenticationMethod":{ + "type":"string", + "enum":[ + "password", + "kerberos" + ] + }, "SslSecurityProtocolValue":{ "type":"string", "enum":[ diff --git a/apis/dms/2016-01-01/docs-2.json b/apis/dms/2016-01-01/docs-2.json index 2c57e184188..b26f5422928 100644 --- a/apis/dms/2016-01-01/docs-2.json +++ b/apis/dms/2016-01-01/docs-2.json @@ -273,6 +273,7 @@ "KafkaSettings$IncludeControlDetails": "

Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. The default is false.

", "KafkaSettings$IncludeNullAndEmpty": "

Include NULL and empty columns for records migrated to the endpoint. The default is false.

", "KafkaSettings$NoHexPrefix": "

Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the NoHexPrefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

", + "KafkaSettings$UseLargeIntegerValue": "

Specifies using the large integer value with Kafka.

", "KinesisSettings$IncludeTransactionDetails": "

Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). The default is false.

", "KinesisSettings$IncludePartitionValue": "

Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. The default is false.

", "KinesisSettings$PartitionIncludeSchemaTable": "

Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kinesis shards. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same shard, which causes throttling. The default is false.

", @@ -280,6 +281,7 @@ "KinesisSettings$IncludeControlDetails": "

Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. The default is false.

", "KinesisSettings$IncludeNullAndEmpty": "

Include NULL and empty columns for records migrated to the endpoint. The default is false.

", "KinesisSettings$NoHexPrefix": "

Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to an Amazon Kinesis target. Use the NoHexPrefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.

", + "KinesisSettings$UseLargeIntegerValue": "

Specifies using the large integer value with Kinesis.

", "MicrosoftSQLServerSettings$QuerySingleAlwaysOnNode": "

Cleans and recreates table metadata information on the replication instance when a mismatch occurs. An example is a situation where running an alter DDL statement on a table might result in different information about the table cached in the replication instance.

", "MicrosoftSQLServerSettings$ReadBackupOnly": "

When this attribute is set to Y, DMS only reads changes from transaction log backups and doesn't read from the active transaction log file during ongoing replication. Setting this parameter to Y enables you to control active transaction log file growth during full load and ongoing replication tasks. However, it can add some source latency to ongoing replication.

", "MicrosoftSQLServerSettings$UseBcpFullLoad": "

Use this to attribute to transfer data for full-load operations using BCP. When the target table contains an identity column that does not exist in the source table, you must disable the use BCP for loading table option.

", @@ -304,21 +306,22 @@ "OracleSettings$ReplacePathPrefix": "

Set this attribute to true in order to use the Binary Reader to capture change data for an Amazon RDS for Oracle as the source. This setting tells DMS instance to replace the default Oracle root with the specified usePathPrefix setting to access the redo logs.

", "OracleSettings$EnableHomogenousTablespace": "

Set this attribute to enable homogenous tablespace replication and create existing tables or indexes under the same tablespace on the target.

", "OracleSettings$DirectPathNoLog": "

When set to true, this attribute helps to increase the commit rate on the Oracle target database by writing directly to tables and not writing a trail to database logs.

", - "OracleSettings$ArchivedLogsOnly": "

When this field is set to Y, DMS only accesses the archived redo logs. If the archived redo logs are stored on Automatic Storage Management (ASM) only, the DMS user account needs to be granted ASM privileges.

", + "OracleSettings$ArchivedLogsOnly": "

When this field is set to True, DMS only accesses the archived redo logs. If the archived redo logs are stored on Automatic Storage Management (ASM) only, the DMS user account needs to be granted ASM privileges.

", "OracleSettings$DirectPathParallelLoad": "

When set to true, this attribute specifies a parallel load when useDirectPathFullLoad is set to Y. This attribute also only applies when you use the DMS parallel load feature. Note that the target table cannot have any constraints or indexes.

", "OracleSettings$FailTasksOnLobTruncation": "

When set to true, this attribute causes a task to fail if the actual size of an LOB column is greater than the specified LobMaxSize.

If a task is set to limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

", "OracleSettings$ReadTableSpaceName": "

When set to true, this attribute supports tablespace replication.

", - "OracleSettings$UseBFile": "

Set this attribute to Y to capture change data using the Binary Reader utility. Set UseLogminerReader to N to set this attribute to Y. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC.

", - "OracleSettings$UseDirectPathFullLoad": "

Set this attribute to Y to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

", - "OracleSettings$UseLogminerReader": "

Set this attribute to Y to capture change data using the Oracle LogMiner utility (the default). Set this attribute to N if you want to access the redo logs as a binary file. When you set UseLogminerReader to N, also set UseBfile to Y. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide.

", + "OracleSettings$UseBFile": "

Set this attribute to True to capture change data using the Binary Reader utility. Set UseLogminerReader to False to set this attribute to True. To use Binary Reader with Amazon RDS for Oracle as the source, you set additional attributes. For more information about using this setting with Oracle Automatic Storage Management (ASM), see Using Oracle LogMiner or DMS Binary Reader for CDC.

", + "OracleSettings$UseDirectPathFullLoad": "

Set this attribute to True to have DMS use a direct path full load. Specify this value to use the direct path protocol in the Oracle Call Interface (OCI). By using this OCI protocol, you can bulk-load Oracle target tables during a full load.

", + "OracleSettings$UseLogminerReader": "

Set this attribute to True to capture change data using the Oracle LogMiner utility (the default). Set this attribute to False if you want to access the redo logs as a binary file. When you set UseLogminerReader to False, also set UseBfile to True. For more information on this setting and using Oracle ASM, see Using Oracle LogMiner or DMS Binary Reader for CDC in the DMS User Guide.

", "OracleSettings$TrimSpaceInChar": "

Use the TrimSpaceInChar source endpoint setting to trim data on CHAR and NCHAR data types during migration. The default value is true.

", "OracleSettings$ConvertTimestampWithZoneToUTC": "

When true, converts timestamps with the timezone datatype to their UTC value.

", - "PostgreSQLSettings$CaptureDdls": "

To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.

If this value is set to N, you don't have to create tables or triggers on the source database.

", - "PostgreSQLSettings$FailTasksOnLobTruncation": "

When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize.

If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

", - "PostgreSQLSettings$HeartbeatEnable": "

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios.

", + "PostgreSQLSettings$CaptureDdls": "

To capture DDL events, DMS creates various artifacts in the PostgreSQL database when the task starts. You can later remove these artifacts.

The default value is true.

If this value is set to N, you don't have to create tables or triggers on the source database.

", + "PostgreSQLSettings$FailTasksOnLobTruncation": "

When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize.

The default value is false.

If task is set to Limited LOB mode and this option is set to true, the task fails instead of truncating the LOB data.

", + "PostgreSQLSettings$HeartbeatEnable": "

The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. This heartbeat keeps restart_lsn moving and prevents storage full scenarios.

The default value is false.

", "PostgreSQLSettings$TrimSpaceInChar": "

Use the TrimSpaceInChar source endpoint setting to trim data on CHAR and NCHAR data types during migration. The default value is true.

", - "PostgreSQLSettings$MapBooleanAsBoolean": "

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

", - "PostgreSQLSettings$MapJsonbAsClob": "

When true, DMS migrates JSONB values as CLOB.

", + "PostgreSQLSettings$MapBooleanAsBoolean": "

When true, lets PostgreSQL migrate the boolean type as boolean. By default, PostgreSQL migrates booleans as varchar(5). You must set this setting on both the source and target endpoints for it to take effect.

The default value is false.

", + "PostgreSQLSettings$MapJsonbAsClob": "

When true, DMS migrates JSONB values as CLOB.

The default value is false.

", + "PostgreSQLSettings$DisableUnicodeSourceFilter": "

Disables the Unicode source filter with PostgreSQL, for values passed into the Selection rule filter on Source Endpoint column values. By default DMS performs source filter comparisons using a Unicode string which can cause look ups to ignore the indexes in the text columns and slow down migrations.

Unicode support should only be disabled when using a selection rule filter is on a text column in the Source database that is indexed.

", "RebootReplicationInstanceMessage$ForceFailover": "

If this parameter is true, the reboot is conducted through a Multi-AZ failover. If the instance isn't configured for Multi-AZ, then you can't specify true. ( --force-planned-failover and --force-failover can't both be set to true.)

", "RebootReplicationInstanceMessage$ForcePlannedFailover": "

If this parameter is true, the reboot is conducted through a planned Multi-AZ failover where resources are released and cleaned up prior to conducting the failover. If the instance isn''t configured for Multi-AZ, then you can't specify true. ( --force-planned-failover and --force-failover can't both be set to true.)

", "Recommendation$Preferred": "

Indicates that this target is the rightsized migration destination.

", @@ -1559,7 +1562,7 @@ "DescribeCertificatesMessage$Filters": "

Filters applied to the certificates described in the form of key-value pairs. Valid values are certificate-arn and certificate-id.

", "DescribeConnectionsMessage$Filters": "

The filters applied to the connection.

Valid filter names: endpoint-arn | replication-instance-arn

", "DescribeDataMigrationsMessage$Filters": "

Filters applied to the data migrations.

", - "DescribeDataProvidersMessage$Filters": "

Filters applied to the data providers described in the form of key-value pairs.

Valid filter names: data-provider-identifier

", + "DescribeDataProvidersMessage$Filters": "

Filters applied to the data providers described in the form of key-value pairs.

Valid filter names and values: data-provider-identifier, data provider arn or name

", "DescribeEndpointTypesMessage$Filters": "

Filters applied to the endpoint types.

Valid filter names: engine-name | endpoint-type

", "DescribeEndpointsMessage$Filters": "

Filters applied to the endpoints.

Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name

", "DescribeEventCategoriesMessage$Filters": "

Filters applied to the event categories.

", @@ -1570,13 +1573,13 @@ "DescribeFleetAdvisorDatabasesRequest$Filters": "

If you specify any of the following filters, the output includes information for only those databases that meet the filter criteria:

An example is: describe-fleet-advisor-databases --filter Name=\"database-id\",Values=\"45\"

", "DescribeFleetAdvisorSchemaObjectSummaryRequest$Filters": "

If you specify any of the following filters, the output includes information for only those schema objects that meet the filter criteria:

Example: describe-fleet-advisor-schema-object-summary --filter Name=\"schema-id\",Values=\"50\"

", "DescribeFleetAdvisorSchemasRequest$Filters": "

If you specify any of the following filters, the output includes information for only those schemas that meet the filter criteria:

An example is: describe-fleet-advisor-schemas --filter Name=\"schema-id\",Values=\"50\"

", - "DescribeInstanceProfilesMessage$Filters": "

Filters applied to the instance profiles described in the form of key-value pairs.

", + "DescribeInstanceProfilesMessage$Filters": "

Filters applied to the instance profiles described in the form of key-value pairs.

Valid filter names and values: instance-profile-identifier, instance profile arn or name

", "DescribeMetadataModelAssessmentsMessage$Filters": "

Filters applied to the metadata model assessments described in the form of key-value pairs.

", "DescribeMetadataModelConversionsMessage$Filters": "

Filters applied to the metadata model conversions described in the form of key-value pairs.

", "DescribeMetadataModelExportsAsScriptMessage$Filters": "

Filters applied to the metadata model exports described in the form of key-value pairs.

", "DescribeMetadataModelExportsToTargetMessage$Filters": "

Filters applied to the metadata model exports described in the form of key-value pairs.

", "DescribeMetadataModelImportsMessage$Filters": "

Filters applied to the metadata model imports described in the form of key-value pairs.

", - "DescribeMigrationProjectsMessage$Filters": "

Filters applied to the migration projects described in the form of key-value pairs.

", + "DescribeMigrationProjectsMessage$Filters": "

Filters applied to the migration projects described in the form of key-value pairs.

Valid filter names and values:

", "DescribePendingMaintenanceActionsMessage$Filters": "

", "DescribeRecommendationLimitationsRequest$Filters": "

Filters applied to the limitations described in the form of key-value pairs.

", "DescribeRecommendationsRequest$Filters": "

Filters applied to the target engine recommendations described in the form of key-value pairs.

", @@ -1826,10 +1829,10 @@ "OracleSettings$Port": "

Endpoint TCP port.

", "OracleSettings$RetryInterval": "

Specifies the number of seconds that the system waits before resending a query.

Example: retryInterval=6;

", "OracleSettings$StandbyDelayTime": "

Use this attribute to specify a time in minutes for the delay in standby sync. If the source is an Oracle Active Data Guard standby database, use this attribute to specify the time lag between primary and standby databases.

In DMS, you can create an Oracle CDC task that uses an Active Data Guard standby instance as a source for replicating ongoing changes. Doing this eliminates the need to connect to an active database that might be in production.

", - "OracleSettings$OpenTransactionWindow": "

The timeframe in minutes to check for open transactions for a CDC-only task.

You can specify an integer value between 0 (the default) and 240 (the maximum).

This parameter is only valid in DMS version 3.5.0 and later. DMS supports a window of up to 9.5 hours including the value for OpenTransactionWindow.

", - "PostgreSQLSettings$MaxFileSize": "

Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.

Example: maxFileSize=512

", + "OracleSettings$OpenTransactionWindow": "

The timeframe in minutes to check for open transactions for a CDC-only task.

You can specify an integer value between 0 (the default) and 240 (the maximum).

This parameter is only valid in DMS version 3.5.0 and later.

", + "PostgreSQLSettings$MaxFileSize": "

Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL.

The default value is 32,768 KB (32 MB).

Example: maxFileSize=512

", "PostgreSQLSettings$ExecuteTimeout": "

Sets the client statement timeout for the PostgreSQL instance, in seconds. The default value is 60 seconds.

Example: executeTimeout=100;

", - "PostgreSQLSettings$HeartbeatFrequency": "

Sets the WAL heartbeat frequency (in minutes).

", + "PostgreSQLSettings$HeartbeatFrequency": "

Sets the WAL heartbeat frequency (in minutes).

The default value is 5 minutes.

", "PostgreSQLSettings$Port": "

Endpoint TCP port. The default is 5432.

", "PostgreSqlDataProviderSettings$Port": "

The port value for the PostgreSQL data provider.

", "RdsConfiguration$StorageSize": "

Describes the storage size of the recommended Amazon RDS DB instance that meets your requirements.

", @@ -1958,6 +1961,14 @@ "KafkaSettings$SslEndpointIdentificationAlgorithm": "

Sets hostname verification for the certificate. This setting is supported in DMS version 3.5.1 and later.

" } }, + "KerberosAuthenticationSettings": { + "base": "

Specifies using Kerberos authentication settings for use with DMS.

", + "refs": { + "CreateReplicationInstanceMessage$KerberosAuthenticationSettings": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when creating a replication instance.

", + "ModifyReplicationInstanceMessage$KerberosAuthenticationSettings": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when modifying a replication instance.

", + "ReplicationInstance$KerberosAuthenticationSettings": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication, when replicating an instance.

" + } + }, "KeyList": { "base": null, "refs": { @@ -2033,7 +2044,7 @@ "LongVarcharMappingType": { "base": null, "refs": { - "PostgreSQLSettings$MapLongVarcharAs": "

When true, DMS migrates LONG values as VARCHAR.

" + "PostgreSQLSettings$MapLongVarcharAs": "

Sets what datatype to map LONG values as.

The default value is wstring.

" } }, "MariaDbDataProviderSettings": { @@ -2264,6 +2275,12 @@ "MongoDbSettings$NestingLevel": "

Specifies either document or table mode.

Default value is \"none\". Specify \"none\" to use document mode. Specify \"one\" to use table mode.

" } }, + "OracleAuthenticationMethod": { + "base": null, + "refs": { + "OracleSettings$AuthenticationMethod": "

Specifies using Kerberos authentication with Oracle.

" + } + }, "OracleDataProviderSettings": { "base": "

Provides information that defines an Oracle data provider.

", "refs": { @@ -2324,7 +2341,7 @@ "PluginNameValue": { "base": null, "refs": { - "PostgreSQLSettings$PluginName": "

Specifies the plugin to use to create a replication slot.

" + "PostgreSQLSettings$PluginName": "

Specifies the plugin to use to create a replication slot.

The default value is pglogical.

" } }, "PostgreSQLSettings": { @@ -2544,6 +2561,16 @@ "ReplicationInstanceList$member": null } }, + "ReplicationInstanceClass": { + "base": null, + "refs": { + "CreateReplicationInstanceMessage$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Choosing the right DMS replication instance; and, Selecting the best size for a replication instance.

", + "ModifyReplicationInstanceMessage$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", + "OrderableReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", + "ReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a default value is pre-selected in the DMS console.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", + "ReplicationPendingModifiedValues$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

" + } + }, "ReplicationInstanceIpv6AddressList": { "base": null, "refs": { @@ -2833,6 +2860,7 @@ "PostgreSQLSettings$Password": "

Endpoint connection password.

", "RedisSettings$AuthPassword": "

The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.

", "RedshiftSettings$Password": "

The password for the user named in the username property.

", + "ReplicationTaskAssessmentResult$S3ObjectUrl": "

The URL of the S3 object containing the task assessment results.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

", "SybaseSettings$Password": "

Endpoint connection password.

" } }, @@ -2871,6 +2899,12 @@ "Event$SourceType": "

The type of DMS resource that generates events.

Valid values: replication-instance | endpoint | replication-task

" } }, + "SqlServerAuthenticationMethod": { + "base": null, + "refs": { + "MicrosoftSQLServerSettings$AuthenticationMethod": "

Specifies using Kerberos authentication with Microsoft SQL Server.

" + } + }, "SslSecurityProtocolValue": { "base": null, "refs": { @@ -3142,7 +3176,6 @@ "CreateReplicationConfigMessage$SupplementalSettings": "

Optional JSON settings for specifying supplemental data. For more information, see Specifying supplemental data for task settings.

", "CreateReplicationConfigMessage$ResourceIdentifier": "

Optional unique value or name that you set for a given resource that can be used to construct an Amazon Resource Name (ARN) for that resource. For more information, see Fine-grained access control using resource names and tags.

", "CreateReplicationInstanceMessage$ReplicationInstanceIdentifier": "

The replication instance identifier. This parameter is stored as a lowercase string.

Constraints:

Example: myrepinstance

", - "CreateReplicationInstanceMessage$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Choosing the right DMS replication instance; and, Selecting the best size for a replication instance.

", "CreateReplicationInstanceMessage$AvailabilityZone": "

The Availability Zone where the replication instance will be created. The default value is a random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region, for example: us-east-1d.

", "CreateReplicationInstanceMessage$ReplicationSubnetGroupIdentifier": "

A subnet group to associate with the replication instance.

", "CreateReplicationInstanceMessage$PreferredMaintenanceWindow": "

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

Default: A 30-minute window selected at random from an 8-hour block of time per Amazon Web Services Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun

Constraints: Minimum 30-minute window.

", @@ -3406,6 +3439,9 @@ "KafkaSettings$SslClientKeyArn": "

The Amazon Resource Name (ARN) for the client private key used to securely connect to a Kafka target endpoint.

", "KafkaSettings$SslCaCertificateArn": "

The Amazon Resource Name (ARN) for the private certificate authority (CA) cert that DMS uses to securely connect to your Kafka target endpoint.

", "KafkaSettings$SaslUsername": "

The secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.

", + "KerberosAuthenticationSettings$KeyCacheSecretId": "

Specifies the secret ID of the key cache for the replication instance.

", + "KerberosAuthenticationSettings$KeyCacheSecretIamArn": "

Specifies the Amazon Resource Name (ARN) of the IAM role that grants Amazon Web Services DMS access to the secret containing key cache file for the replication instance.

", + "KerberosAuthenticationSettings$Krb5FileContents": "

Specifies the ID of the secret that stores the key cache file required for kerberos authentication of the replication instance.

", "KeyList$member": null, "KinesisSettings$StreamArn": "

The Amazon Resource Name (ARN) for the Amazon Kinesis Data Streams endpoint.

", "KinesisSettings$ServiceAccessRoleArn": "

The Amazon Resource Name (ARN) for the IAM role that DMS uses to write to the Kinesis data stream. The role must allow the iam:PassRole action.

", @@ -3476,7 +3512,6 @@ "ModifyReplicationConfigMessage$SourceEndpointArn": "

The Amazon Resource Name (ARN) of the source endpoint for this DMS serverless replication configuration.

", "ModifyReplicationConfigMessage$TargetEndpointArn": "

The Amazon Resource Name (ARN) of the target endpoint for this DMS serverless replication configuration.

", "ModifyReplicationInstanceMessage$ReplicationInstanceArn": "

The Amazon Resource Name (ARN) of the replication instance.

", - "ModifyReplicationInstanceMessage$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", "ModifyReplicationInstanceMessage$PreferredMaintenanceWindow": "

The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.

Default: Uses existing setting

Format: ddd:hh24:mi-ddd:hh24:mi

Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun

Constraints: Must be at least 30 minutes

", "ModifyReplicationInstanceMessage$EngineVersion": "

The engine version number of the replication instance.

When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade to true.

", "ModifyReplicationInstanceMessage$ReplicationInstanceIdentifier": "

The replication instance identifier. This parameter is stored as a lowercase string.

", @@ -3539,15 +3574,14 @@ "OracleSettings$SecretsManagerOracleAsmAccessRoleArn": "

Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN of the IAM role that specifies DMS as the trusted entity and grants the required permissions to access the SecretsManagerOracleAsmSecret. This SecretsManagerOracleAsmSecret has the secret value that allows access to the Oracle ASM of the endpoint.

You can specify one of two sets of values for these permissions. You can specify the values for this setting and SecretsManagerOracleAsmSecretId. Or you can specify clear-text values for AsmUser, AsmPassword, and AsmServerName. You can't specify both. For more information on creating this SecretsManagerOracleAsmSecret and the SecretsManagerOracleAsmAccessRoleArn and SecretsManagerOracleAsmSecretId required to access it, see Using secrets to access Database Migration Service resources in the Database Migration Service User Guide.

", "OracleSettings$SecretsManagerOracleAsmSecretId": "

Required only if your Oracle endpoint uses Automatic Storage Management (ASM). The full ARN, partial ARN, or friendly name of the SecretsManagerOracleAsmSecret that contains the Oracle ASM connection details for the Oracle endpoint.

", "OrderableReplicationInstance$EngineVersion": "

The version of the replication engine.

", - "OrderableReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. For example to specify the instance class dms.c4.large, set this parameter to \"dms.c4.large\".

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", "OrderableReplicationInstance$StorageType": "

The type of storage used by the replication instance.

", "PendingMaintenanceAction$Action": "

The type of pending maintenance action that is available for the resource.

", "PendingMaintenanceAction$OptInStatus": "

The type of opt-in request that has been received for the resource.

", "PendingMaintenanceAction$Description": "

A description providing more detail about the maintenance action.

", "PostgreSQLSettings$AfterConnectScript": "

For use with change data capture (CDC) only, this attribute has DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.

Example: afterConnectScript=SET session_replication_role='replica'

", "PostgreSQLSettings$DatabaseName": "

Database name for the endpoint.

", - "PostgreSQLSettings$DdlArtifactsSchema": "

The schema in which the operational DDL database artifacts are created.

Example: ddlArtifactsSchema=xyzddlschema;

", - "PostgreSQLSettings$HeartbeatSchema": "

Sets the schema in which the heartbeat artifacts are created.

", + "PostgreSQLSettings$DdlArtifactsSchema": "

The schema in which the operational DDL database artifacts are created.

The default value is public.

Example: ddlArtifactsSchema=xyzddlschema;

", + "PostgreSQLSettings$HeartbeatSchema": "

Sets the schema in which the heartbeat artifacts are created.

The default value is public.

", "PostgreSQLSettings$ServerName": "

The host name of the endpoint database.

For an Amazon RDS PostgreSQL instance, this is the output of DescribeDBInstances, in the Endpoint.Address field.

For an Aurora PostgreSQL instance, this is the output of DescribeDBClusters, in the Endpoint field.

", "PostgreSQLSettings$Username": "

Endpoint connection user name.

", "PostgreSQLSettings$SlotName": "

Sets the name of a previously created logical replication slot for a change data capture (CDC) load of the PostgreSQL source instance.

When used with the CdcStartPosition request parameter for the DMS API , this attribute also makes it possible to use native CDC start points. DMS verifies that the specified logical replication slot exists before starting the CDC load task. It also verifies that the task was created with a valid setting of CdcStartPosition. If the specified slot doesn't exist or the task doesn't have a valid CdcStartPosition setting, DMS raises an error.

For more information about setting the CdcStartPosition request parameter, see Determining a CDC native start point in the Database Migration Service User Guide. For more information about using CdcStartPosition, see CreateReplicationTask, StartReplicationTask, and ModifyReplicationTask.

", @@ -3610,7 +3644,7 @@ "Replication$TargetEndpointArn": "

The Amazon Resource Name for an existing Endpoint the serverless replication uses for its data target.

", "Replication$Status": "

The current status of the serverless replication.

", "Replication$StopReason": "

The reason the replication task was stopped. This response parameter can return one of the following values:

", - "Replication$StartReplicationType": "

The replication type.

", + "Replication$StartReplicationType": "

The type of replication to start.

", "Replication$CdcStartPosition": "

Indicates the start time for a change data capture (CDC) operation. Use either CdcStartTime or CdcStartPosition to specify when you want a CDC operation to start. Specifying both values results in an error.

", "Replication$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

", "Replication$RecoveryCheckpoint": "

Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition parameter to start a CDC operation that begins at that checkpoint.

", @@ -3622,7 +3656,6 @@ "ReplicationConfig$SupplementalSettings": "

Additional parameters for an DMS serverless replication.

", "ReplicationConfig$TableMappings": "

Table mappings specified in the replication.

", "ReplicationInstance$ReplicationInstanceIdentifier": "

The replication instance identifier is a required parameter. This parameter is stored as a lowercase string.

Constraints:

Example: myrepinstance

", - "ReplicationInstance$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class. It is a required parameter, although a default value is pre-selected in the DMS console.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", "ReplicationInstance$ReplicationInstanceStatus": "

The status of the replication instance. The possible return values include:

", "ReplicationInstance$AvailabilityZone": "

The Availability Zone for the instance.

", "ReplicationInstance$PreferredMaintenanceWindow": "

The maintenance window times for the replication instance. Any pending upgrades to the replication instance are performed during this time.

", @@ -3639,7 +3672,6 @@ "ReplicationInstancePublicIpAddressList$member": null, "ReplicationInstanceTaskLog$ReplicationTaskName": "

The name of the replication task.

", "ReplicationInstanceTaskLog$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", - "ReplicationPendingModifiedValues$ReplicationInstanceClass": "

The compute and memory capacity of the replication instance as defined for the specified replication instance class.

For more information on the settings and capacities for the available replication instance classes, see Selecting the right DMS replication instance for your migration.

", "ReplicationPendingModifiedValues$EngineVersion": "

The engine version number of the replication instance.

", "ReplicationPendingModifiedValues$NetworkType": "

The type of IP address protocol used by a replication instance, such as IPv4 only or Dual-stack that supports both IPv4 and IPv6 addressing. IPv6 only is not yet supported.

", "ReplicationSubnetGroup$ReplicationSubnetGroupIdentifier": "

The identifier of the replication instance subnet group.

", @@ -3654,7 +3686,7 @@ "ReplicationTask$ReplicationTaskSettings": "

The settings for the replication task.

", "ReplicationTask$Status": "

The status of the replication task. This response parameter can return one of the following values:

", "ReplicationTask$LastFailureMessage": "

The last error (failure) message generated for the replication task.

", - "ReplicationTask$StopReason": "

The reason the replication task was stopped. This response parameter can return one of the following values:

", + "ReplicationTask$StopReason": "

The reason the replication task was stopped. This response parameter can return one of the following values:

", "ReplicationTask$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want the CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

Date Example: --cdc-start-position “2018-03-08T12:12:12”

Checkpoint Example: --cdc-start-position \"checkpoint:V1#27#mysql-bin-changelog.157832:1975:-1:2002:677883278264080:mysql-bin-changelog.157832:1876#0#0#*#0#93\"

LSN Example: --cdc-start-position “mysql-bin-changelog.000024:373”

", "ReplicationTask$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12”

Commit time example: --cdc-stop-position “commit_time:2018-02-09T12:12:12“

", "ReplicationTask$RecoveryCheckpoint": "

Indicates the last checkpoint that occurred during a change data capture (CDC) operation. You can provide this value to the CdcStartPosition parameter to start a CDC operation that begins at that checkpoint.

", @@ -3666,10 +3698,9 @@ "ReplicationTaskAssessmentResult$AssessmentStatus": "

The status of the task assessment.

", "ReplicationTaskAssessmentResult$AssessmentResultsFile": "

The file containing the results of the task assessment.

", "ReplicationTaskAssessmentResult$AssessmentResults": "

The task assessment results in JSON format.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

", - "ReplicationTaskAssessmentResult$S3ObjectUrl": "

The URL of the S3 object containing the task assessment results.

The response object only contains this field if you provide DescribeReplicationTaskAssessmentResultsMessage$ReplicationTaskArn in the request.

", "ReplicationTaskAssessmentRun$ReplicationTaskAssessmentRunArn": "

Amazon Resource Name (ARN) of this assessment run.

", "ReplicationTaskAssessmentRun$ReplicationTaskArn": "

ARN of the migration task associated with this premigration assessment run.

", - "ReplicationTaskAssessmentRun$Status": "

Assessment run status.

This status can have one of the following values:

", + "ReplicationTaskAssessmentRun$Status": "

Assessment run status.

This status can have one of the following values:

", "ReplicationTaskAssessmentRun$LastFailureMessage": "

Last message generated by an individual assessment failure.

", "ReplicationTaskAssessmentRun$ServiceAccessRoleArn": "

ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun operation. The role must allow the iam:PassRole action.

", "ReplicationTaskAssessmentRun$ResultLocationBucket": "

Amazon S3 bucket where DMS stores the results of this assessment run.

", @@ -3739,7 +3770,7 @@ "StartRecommendationsRequest$DatabaseId": "

The identifier of the source database to analyze and provide recommendations for.

", "StartRecommendationsRequestEntry$DatabaseId": "

The identifier of the source database.

", "StartReplicationMessage$ReplicationConfigArn": "

The Amazon Resource Name of the replication for which to start replication.

", - "StartReplicationMessage$StartReplicationType": "

The replication type.

", + "StartReplicationMessage$StartReplicationType": "

The replication type.

When the replication type is full-load or full-load-and-cdc, the only valid value for the first run of the replication is start-replication. This option will start the replication.

You can also use ReloadTables to reload specific tables that failed during replication instead of restarting the replication.

The resume-processing option isn't applicable for a full-load replication, because you can't resume partially loaded tables during the full load phase.

For a full-load-and-cdc replication, DMS migrates table data, and then applies data changes that occur on the source. To load all the tables again, and start capturing source changes, use reload-target. Otherwise use resume-processing, to replicate the changes from the last stop position.

", "StartReplicationMessage$CdcStartPosition": "

Indicates when you want a change data capture (CDC) operation to start. Use either CdcStartPosition or CdcStartTime to specify when you want a CDC operation to start. Specifying both values results in an error.

The value can be in date, checkpoint, or LSN/SCN format.

", "StartReplicationMessage$CdcStopPosition": "

Indicates when you want a change data capture (CDC) operation to stop. The value can be either server time or commit time.

", "StartReplicationTaskAssessmentMessage$ReplicationTaskArn": "

The Amazon Resource Name (ARN) of the replication task.

", diff --git a/apis/glue/2017-03-31/api-2.json b/apis/glue/2017-03-31/api-2.json index 3cad712a5b4..98088b21980 100644 --- a/apis/glue/2017-03-31/api-2.json +++ b/apis/glue/2017-03-31/api-2.json @@ -7278,6 +7278,20 @@ "max":2000, "min":0 }, + "DataQualityEncryption":{ + "type":"structure", + "members":{ + "DataQualityEncryptionMode":{"shape":"DataQualityEncryptionMode"}, + "KmsKeyArn":{"shape":"KmsKeyArn"} + } + }, + "DataQualityEncryptionMode":{ + "type":"string", + "enum":[ + "DISABLED", + "SSE-KMS" + ] + }, "DataQualityEvaluationRunAdditionalRunOptions":{ "type":"structure", "members":{ @@ -8496,7 +8510,8 @@ "members":{ "S3Encryption":{"shape":"S3EncryptionList"}, "CloudWatchEncryption":{"shape":"CloudWatchEncryption"}, - "JobBookmarksEncryption":{"shape":"JobBookmarksEncryption"} + "JobBookmarksEncryption":{"shape":"JobBookmarksEncryption"}, + "DataQualityEncryption":{"shape":"DataQualityEncryption"} } }, "Entity":{ diff --git a/apis/glue/2017-03-31/docs-2.json b/apis/glue/2017-03-31/docs-2.json index 98fddbf2cd2..6c4b5f6829d 100644 --- a/apis/glue/2017-03-31/docs-2.json +++ b/apis/glue/2017-03-31/docs-2.json @@ -49,7 +49,7 @@ "CreateSession": "

Creates a new session.

", "CreateTable": "

Creates a new table definition in the Data Catalog.

", "CreateTableOptimizer": "

Creates a new table optimizer for a specific function.

", - "CreateTrigger": "

Creates a new trigger.

", + "CreateTrigger": "

Creates a new trigger.

Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

", "CreateUsageProfile": "

Creates an Glue usage profile.

", "CreateUserDefinedFunction": "

Creates a new function definition in the Data Catalog.

", "CreateWorkflow": "

Creates a new workflow.

", @@ -124,8 +124,8 @@ "GetIntegrationTableProperties": "

This API is used to retrieve optional override properties for the tables that need to be replicated. These properties can include properties for filtering and partition for source and target tables.

", "GetJob": "

Retrieves an existing job definition.

", "GetJobBookmark": "

Returns information on a job bookmark entry.

For more information about enabling and using job bookmarks, see:

", - "GetJobRun": "

Retrieves the metadata for a given job run. Job run history is accessible for 90 days for your workflow and job run.

", - "GetJobRuns": "

Retrieves metadata for all runs of a given job definition.

", + "GetJobRun": "

Retrieves the metadata for a given job run. Job run history is accessible for 365 days for your workflow and job run.

", + "GetJobRuns": "

Retrieves metadata for all runs of a given job definition.

GetJobRuns returns the job runs in chronological order, with the newest jobs returned first.

", "GetJobs": "

Retrieves all current job definitions.

", "GetMLTaskRun": "

Gets details for a specific task run on a machine learning transform. Machine learning task runs are asynchronous tasks that Glue runs on your behalf as part of various machine learning workflows. You can check the stats of any task run by calling GetMLTaskRun with the TaskRunID and its parent transform's TransformID.

", "GetMLTaskRuns": "

Gets a list of runs for a machine learning transform. Machine learning task runs are asynchronous tasks that Glue runs on your behalf as part of various machine learning workflows. You can get a sortable, filterable list of machine learning task runs by calling GetMLTaskRuns with their parent transform's TransformID and other optional parameters as documented in this section.

This operation returns a list of historic runs and must be paginated.

", @@ -251,7 +251,7 @@ "UpdateSourceControlFromJob": "

Synchronizes a job to the source control repository. This operation takes the job artifacts from the Glue internal stores and makes a commit to the remote repository that is configured on the job.

This API supports optional parameters which take in the repository information.

", "UpdateTable": "

Updates a metadata table in the Data Catalog.

", "UpdateTableOptimizer": "

Updates the configuration for an existing table optimizer.

", - "UpdateTrigger": "

Updates a trigger definition.

", + "UpdateTrigger": "

Updates a trigger definition.

Job arguments may be logged. Do not pass plaintext secrets as arguments. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to keep them within the Job.

", "UpdateUsageProfile": "

Update an Glue usage profile.

", "UpdateUserDefinedFunction": "

Updates an existing function definition in the Data Catalog.

", "UpdateWorkflow": "

Updates an existing workflow.

" @@ -2744,6 +2744,18 @@ "GetDataQualityResultResponse$AnalyzerResults": "

A list of DataQualityAnalyzerResult objects representing the results for each analyzer.

" } }, + "DataQualityEncryption": { + "base": "

Specifies how Data Quality assets in your account should be encrypted.

", + "refs": { + "EncryptionConfiguration$DataQualityEncryption": "

The encryption configuration for Glue Data Quality assets.

" + } + }, + "DataQualityEncryptionMode": { + "base": null, + "refs": { + "DataQualityEncryption$DataQualityEncryptionMode": "

The encryption mode to use for encrypting Data Quality assets. These assets include data quality rulesets, results, statistics, anomaly detection models and observations.

Valid values are SSEKMS for encryption using a customer-managed KMS key, or DISABLED.

" + } + }, "DataQualityEvaluationRunAdditionalRunOptions": { "base": "

Additional run options you can specify for an evaluation run.

", "refs": { @@ -6167,6 +6179,7 @@ "refs": { "AuthenticationConfigurationInput$KmsKeyArn": "

The ARN of the KMS key used to encrypt the connection. Only taken an as input in the request and stored in the Secret Manager.

", "CloudWatchEncryption$KmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

", + "DataQualityEncryption$KmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

", "JobBookmarksEncryption$KmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

", "S3Encryption$KmsKeyArn": "

The Amazon Resource Name (ARN) of the KMS key to be used to encrypt the data.

" } @@ -11367,17 +11380,17 @@ "refs": { "CreateDevEndpointRequest$WorkerType": "

The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.

Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.

", "CreateDevEndpointResponse$WorkerType": "

The type of predefined worker that is allocated to the development endpoint. May be a value of Standard, G.1X, or G.2X.

", - "CreateJobRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", + "CreateJobRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", "CreateMLTransformRequest$WorkerType": "

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType.

", - "CreateSessionRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

", + "CreateSessionRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks.

", "DevEndpoint$WorkerType": "

The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.

Known issue: when a development endpoint is created with the G.2X WorkerType configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.

", "GetMLTransformResponse$WorkerType": "

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

", - "Job$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", - "JobRun$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", - "JobUpdate$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", + "Job$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", + "JobRun$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", + "JobUpdate$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", "MLTransform$WorkerType": "

The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.

MaxCapacity is a mutually exclusive option with NumberOfWorkers and WorkerType.

", "Session$WorkerType": "

The type of predefined worker that is allocated when a session runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark sessions. Accepts the value Z.2X for Ray sessions.

", - "StartJobRunRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", + "StartJobRunRequest$WorkerType": "

The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs.

", "UpdateMLTransformRequest$WorkerType": "

The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.

" } }, @@ -11414,11 +11427,11 @@ "WorkflowRunProperties": { "base": null, "refs": { - "CreateWorkflowRequest$DefaultRunProperties": "

A collection of properties to be used as part of each execution of the workflow.

", + "CreateWorkflowRequest$DefaultRunProperties": "

A collection of properties to be used as part of each execution of the workflow.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

", "GetWorkflowRunPropertiesResponse$RunProperties": "

The workflow run properties which were set during the specified run.

", - "PutWorkflowRunPropertiesRequest$RunProperties": "

The properties to put for the specified run.

", - "StartWorkflowRunRequest$RunProperties": "

The workflow run properties for the new workflow run.

", - "UpdateWorkflowRequest$DefaultRunProperties": "

A collection of properties to be used as part of each execution of the workflow.

", + "PutWorkflowRunPropertiesRequest$RunProperties": "

The properties to put for the specified run.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

", + "StartWorkflowRunRequest$RunProperties": "

The workflow run properties for the new workflow run.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

", + "UpdateWorkflowRequest$DefaultRunProperties": "

A collection of properties to be used as part of each execution of the workflow.

Run properties may be logged. Do not pass plaintext secrets as properties. Retrieve secrets from a Glue Connection, Amazon Web Services Secrets Manager or other secret management mechanism if you intend to use them within the workflow run.

", "Workflow$DefaultRunProperties": "

A collection of properties to be used as part of each execution of the workflow. The run properties are made available to each job in the workflow. A job can modify the properties for the next jobs in the flow.

", "WorkflowRun$WorkflowRunProperties": "

The workflow run properties which were set during the run.

" } diff --git a/apis/guardduty/2017-11-28/docs-2.json b/apis/guardduty/2017-11-28/docs-2.json index 996419f9415..8d0f796be1f 100644 --- a/apis/guardduty/2017-11-28/docs-2.json +++ b/apis/guardduty/2017-11-28/docs-2.json @@ -393,7 +393,7 @@ "ThreatDetectedByName$Shortened": "

Flag to determine if the finding contains every single infected file-path and/or every threat.

", "UpdateDetectorRequest$Enable": "

Specifies whether the detector is enabled or not enabled.

", "UpdateIPSetRequest$Activate": "

The updated Boolean value that specifies whether the IPSet is active or not.

", - "UpdateOrganizationConfigurationRequest$AutoEnable": "

Represents whether or not to automatically enable member accounts in the organization.

Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable.

", + "UpdateOrganizationConfigurationRequest$AutoEnable": "

Represents whether to automatically enable member accounts in the organization. This applies to only new member accounts, not the existing member accounts. When a new account joins the organization, the chosen features will be enabled for them by default.

Even though this is still supported, we recommend using AutoEnableOrganizationMembers to achieve the similar results. You must provide a value for either autoEnableOrganizationMembers or autoEnable.

", "UpdateThreatIntelSetRequest$Activate": "

The updated Boolean value that specifies whether the ThreateIntelSet is active or not.

" } }, @@ -1030,7 +1030,7 @@ "ListPublishingDestinationsRequest$DetectorId": "

The detector ID for which you want to retrieve the publishing destination.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", "ListThreatIntelSetsRequest$DetectorId": "

The unique ID of the detector that is associated with the threatIntelSet.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", "Member$DetectorId": "

The detector ID of the member account.

", - "Scan$DetectorId": "

The unique ID of the detector that the request is associated with.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", + "Scan$DetectorId": "

The unique ID of the detector that is associated with the request.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", "Scan$AdminDetectorId": "

The unique detector ID of the administrator account that the request is associated with. If the account is an administrator, the AdminDetectorId will be the same as the one used for DetectorId.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", "Service$DetectorId": "

The detector ID for the GuardDuty service.

", "StartMonitoringMembersRequest$DetectorId": "

The unique ID of the detector of the GuardDuty administrator account associated with the member accounts to monitor.

To find the detectorId in the current Region, see the Settings page in the GuardDuty console, or run the ListDetectors API.

", @@ -1339,7 +1339,7 @@ "FindingCriteria": { "base": "

Contains information about the criteria used for querying findings.

", "refs": { - "CreateFilterRequest$FindingCriteria": "

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

", + "CreateFilterRequest$FindingCriteria": "

Represents the criteria to be used in the filter for querying findings.

You can only use the following attributes to query findings:

", "GetFilterResponse$FindingCriteria": "

Represents the criteria to be used in the filter for querying findings.

", "GetFindingsStatisticsRequest$FindingCriteria": "

Represents the criteria that is used for querying findings.

", "ListFindingsRequest$FindingCriteria": "

Represents the criteria used for querying findings. Valid values include:

", @@ -2426,7 +2426,7 @@ "refs": { "FilterCondition$EqualsValue": "

Represents an equal condition to be applied to a single field when querying for scan entries.

", "Scan$ScanId": "

The unique scan ID associated with a scan entry.

", - "Scan$FailureReason": "

Represents the reason for FAILED scan status.

", + "Scan$FailureReason": "

Represents the reason for FAILED scan status.

", "StartMalwareScanResponse$ScanId": "

A unique identifier that gets generated when you invoke the API without any error. Each malware scan has a corresponding scan ID. Using this scan ID, you can monitor the status of your malware scan.

", "TriggerDetails$GuardDutyFindingId": "

The ID of the GuardDuty finding that triggered the malware scan.

", "TriggerDetails$Description": "

The description of the scan trigger.

" @@ -2474,8 +2474,8 @@ "refs": { "MemberAdditionalConfiguration$Name": "

Name of the additional configuration.

", "MemberAdditionalConfigurationResult$Name": "

Indicates the name of the additional configuration that is set for the member account.

", - "OrganizationAdditionalConfiguration$Name": "

The name of the additional configuration that will be configured for the organization.

", - "OrganizationAdditionalConfigurationResult$Name": "

The name of the additional configuration that is configured for the member accounts within the organization.

", + "OrganizationAdditionalConfiguration$Name": "

The name of the additional configuration that will be configured for the organization. These values are applicable to only Runtime Monitoring protection plan.

", + "OrganizationAdditionalConfigurationResult$Name": "

The name of the additional configuration that is configured for the member accounts within the organization. These values are applicable to only Runtime Monitoring protection plan.

", "OrganizationFeatureStatisticsAdditionalConfiguration$Name": "

Name of the additional configuration within a feature.

" } }, @@ -2495,7 +2495,7 @@ } }, "OrganizationAdditionalConfiguration": { - "base": "

A list of additional configurations which will be configured for the organization.

", + "base": "

A list of additional configurations which will be configured for the organization.

Additional configuration applies to only GuardDuty Runtime Monitoring protection plan.

", "refs": { "OrganizationAdditionalConfigurations$member": null } @@ -2965,7 +2965,7 @@ } }, "Scan": { - "base": "

Contains information about a malware scan.

", + "base": "

Contains information about malware scans associated with GuardDuty Malware Protection for EC2.

", "refs": { "Scans$member": null } @@ -3072,7 +3072,7 @@ "Scans": { "base": null, "refs": { - "DescribeMalwareScansResponse$Scans": "

Contains information about malware scans.

" + "DescribeMalwareScansResponse$Scans": "

Contains information about malware scans associated with GuardDuty Malware Protection for EC2.

" } }, "SecurityContext": { diff --git a/apis/route53domains/2014-05-15/api-2.json b/apis/route53domains/2014-05-15/api-2.json index e20e9f40a8e..4d2eb5cb2d0 100644 --- a/apis/route53domains/2014-05-15/api-2.json +++ b/apis/route53domains/2014-05-15/api-2.json @@ -1353,7 +1353,7 @@ }, "LangCode":{ "type":"string", - "max":3 + "pattern":"|[A-Za-z]{2,3}" }, "ListDomainsAttributeName":{ "type":"string", @@ -1512,13 +1512,14 @@ "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", - "TRANSFER_ON_RENEW" + "TRANSFER_ON_RENEW", + "RESTORE_DOMAIN" ] }, "OperationTypeList":{ "type":"list", "member":{"shape":"OperationType"}, - "max":20 + "max":21 }, "Operator":{ "type":"string", @@ -1540,7 +1541,10 @@ "type":"string", "sensitive":true }, - "Price":{"type":"double"}, + "Price":{ + "type":"double", + "min":0.0 + }, "PriceWithCurrency":{ "type":"structure", "required":[ diff --git a/apis/route53domains/2014-05-15/paginators-1.json b/apis/route53domains/2014-05-15/paginators-1.json index 739710b8046..b97198260e0 100644 --- a/apis/route53domains/2014-05-15/paginators-1.json +++ b/apis/route53domains/2014-05-15/paginators-1.json @@ -2,27 +2,27 @@ "pagination": { "ListDomains": { "input_token": "Marker", - "limit_key": "MaxItems", "output_token": "NextPageMarker", + "limit_key": "MaxItems", "result_key": "Domains" }, "ListOperations": { "input_token": "Marker", - "limit_key": "MaxItems", "output_token": "NextPageMarker", + "limit_key": "MaxItems", "result_key": "Operations" }, "ListPrices": { "input_token": "Marker", - "limit_key": "MaxItems", "output_token": "NextPageMarker", + "limit_key": "MaxItems", "result_key": "Prices" }, "ViewBilling": { "input_token": "Marker", - "limit_key": "MaxItems", "output_token": "NextPageMarker", + "limit_key": "MaxItems", "result_key": "BillingRecords" } } -} \ No newline at end of file +} diff --git a/gems/aws-partitions/CHANGELOG.md b/gems/aws-partitions/CHANGELOG.md index f588e2fb826..5efe9012c49 100644 --- a/gems/aws-partitions/CHANGELOG.md +++ b/gems/aws-partitions/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.1022.0 (2024-12-12) +------------------ + +* Feature - Updated the partitions source data the determines the AWS service regions and endpoints. + 1.1021.0 (2024-12-11) ------------------ diff --git a/gems/aws-partitions/VERSION b/gems/aws-partitions/VERSION index df7bb5d43d5..b1e4c58a095 100644 --- a/gems/aws-partitions/VERSION +++ b/gems/aws-partitions/VERSION @@ -1 +1 @@ -1.1021.0 +1.1022.0 diff --git a/gems/aws-partitions/partitions.json b/gems/aws-partitions/partitions.json index 93ffb541d3c..80f78bc40c0 100644 --- a/gems/aws-partitions/partitions.json +++ b/gems/aws-partitions/partitions.json @@ -14403,6 +14403,7 @@ "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ap-southeast-4" : { }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "network-firewall-fips.ca-central-1.amazonaws.com", @@ -31875,6 +31876,18 @@ "us-isob-east-1" : { } } }, + "organizations" : { + "endpoints" : { + "aws-iso-b-global" : { + "credentialScope" : { + "region" : "us-isob-east-1" + }, + "hostname" : "organizations.us-isob-east-1.sc2s.sgov.gov" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-iso-b-global" + }, "outposts" : { "endpoints" : { "us-isob-east-1" : { } diff --git a/gems/aws-sdk-connect/CHANGELOG.md b/gems/aws-sdk-connect/CHANGELOG.md index 26093796645..e74eb7ae2ae 100644 --- a/gems/aws-sdk-connect/CHANGELOG.md +++ b/gems/aws-sdk-connect/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.190.0 (2024-12-12) +------------------ + +* Feature - Configure holidays and other overrides to hours of operation in advance. During contact handling, Amazon Connect automatically checks for overrides and provides customers with an appropriate flow path. After an override period passes call center automatically reverts to standard hours of operation. + 1.189.0 (2024-12-10) ------------------ diff --git a/gems/aws-sdk-connect/VERSION b/gems/aws-sdk-connect/VERSION index 5367967643e..8d5d795d9e2 100644 --- a/gems/aws-sdk-connect/VERSION +++ b/gems/aws-sdk-connect/VERSION @@ -1 +1 @@ -1.189.0 +1.190.0 diff --git a/gems/aws-sdk-connect/lib/aws-sdk-connect.rb b/gems/aws-sdk-connect/lib/aws-sdk-connect.rb index baa79f88f49..ae2d9022eb0 100644 --- a/gems/aws-sdk-connect/lib/aws-sdk-connect.rb +++ b/gems/aws-sdk-connect/lib/aws-sdk-connect.rb @@ -54,7 +54,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-connect/endpoint_provider' autoload :Endpoints, 'aws-sdk-connect/endpoints' - GEM_VERSION = '1.189.0' + GEM_VERSION = '1.190.0' end diff --git a/gems/aws-sdk-connect/lib/aws-sdk-connect/client.rb b/gems/aws-sdk-connect/lib/aws-sdk-connect/client.rb index fbdd1d5db6f..71d2d2cdad8 100644 --- a/gems/aws-sdk-connect/lib/aws-sdk-connect/client.rb +++ b/gems/aws-sdk-connect/lib/aws-sdk-connect/client.rb @@ -2294,6 +2294,73 @@ def create_hours_of_operation(params = {}, options = {}) req.send_request(options) end + # Creates an hours of operation override in an Amazon Connect hours of + # operation resource + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [required, String] :hours_of_operation_id + # The identifier for the hours of operation + # + # @option params [required, String] :name + # The name of the hours of operation override. + # + # @option params [String] :description + # The description of the hours of operation override. + # + # @option params [required, Array] :config + # Configuration information for the hours of operation override: day, + # start time, and end time. + # + # @option params [required, String] :effective_from + # The date from when the hours of operation override would be effective. + # + # @option params [required, String] :effective_till + # The date until when the hours of operation override would be + # effective. + # + # @return [Types::CreateHoursOfOperationOverrideResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::CreateHoursOfOperationOverrideResponse#hours_of_operation_override_id #hours_of_operation_override_id} => String + # + # @example Request syntax with placeholder values + # + # resp = client.create_hours_of_operation_override({ + # instance_id: "InstanceId", # required + # hours_of_operation_id: "HoursOfOperationId", # required + # name: "CommonHumanReadableName", # required + # description: "CommonHumanReadableDescription", + # config: [ # required + # { + # day: "SUNDAY", # accepts SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY + # start_time: { + # hours: 1, # required + # minutes: 1, # required + # }, + # end_time: { + # hours: 1, # required + # minutes: 1, # required + # }, + # }, + # ], + # effective_from: "HoursOfOperationOverrideYearMonthDayDateFormat", # required + # effective_till: "HoursOfOperationOverrideYearMonthDayDateFormat", # required + # }) + # + # @example Response structure + # + # resp.hours_of_operation_override_id #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/CreateHoursOfOperationOverride AWS API Documentation + # + # @overload create_hours_of_operation_override(params = {}) + # @param [Hash] params ({}) + def create_hours_of_operation_override(params = {}, options = {}) + req = build_request(:create_hours_of_operation_override, params) + req.send_request(options) + end + # This API is in preview release for Amazon Connect and is subject to # change. # @@ -2820,9 +2887,6 @@ def create_push_notification_registration(params = {}, options = {}) req.send_request(options) end - # This API is in preview release for Amazon Connect and is subject to - # change. - # # Creates a new queue for the specified Amazon Connect instance. # # * If the phone number is claimed to a traffic distribution group that @@ -4316,6 +4380,37 @@ def delete_hours_of_operation(params = {}, options = {}) req.send_request(options) end + # Deletes an hours of operation override in an Amazon Connect hours of + # operation resource + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [required, String] :hours_of_operation_id + # The identifier for the hours of operation. + # + # @option params [required, String] :hours_of_operation_override_id + # The identifier for the hours of operation override. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.delete_hours_of_operation_override({ + # instance_id: "InstanceId", # required + # hours_of_operation_id: "HoursOfOperationId", # required + # hours_of_operation_override_id: "HoursOfOperationOverrideId", # required + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DeleteHoursOfOperationOverride AWS API Documentation + # + # @overload delete_hours_of_operation_override(params = {}) + # @param [Hash] params ({}) + def delete_hours_of_operation_override(params = {}, options = {}) + req = build_request(:delete_hours_of_operation_override, params) + req.send_request(options) + end + # This API is in preview release for Amazon Connect and is subject to # change. # @@ -5588,6 +5683,54 @@ def describe_hours_of_operation(params = {}, options = {}) req.send_request(options) end + # Describes the hours of operation override. + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [required, String] :hours_of_operation_id + # The identifier for the hours of operation. + # + # @option params [required, String] :hours_of_operation_override_id + # The identifier for the hours of operation override. + # + # @return [Types::DescribeHoursOfOperationOverrideResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::DescribeHoursOfOperationOverrideResponse#hours_of_operation_override #hours_of_operation_override} => Types::HoursOfOperationOverride + # + # @example Request syntax with placeholder values + # + # resp = client.describe_hours_of_operation_override({ + # instance_id: "InstanceId", # required + # hours_of_operation_id: "HoursOfOperationId", # required + # hours_of_operation_override_id: "HoursOfOperationOverrideId", # required + # }) + # + # @example Response structure + # + # resp.hours_of_operation_override.hours_of_operation_override_id #=> String + # resp.hours_of_operation_override.hours_of_operation_id #=> String + # resp.hours_of_operation_override.hours_of_operation_arn #=> String + # resp.hours_of_operation_override.name #=> String + # resp.hours_of_operation_override.description #=> String + # resp.hours_of_operation_override.config #=> Array + # resp.hours_of_operation_override.config[0].day #=> String, one of "SUNDAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY" + # resp.hours_of_operation_override.config[0].start_time.hours #=> Integer + # resp.hours_of_operation_override.config[0].start_time.minutes #=> Integer + # resp.hours_of_operation_override.config[0].end_time.hours #=> Integer + # resp.hours_of_operation_override.config[0].end_time.minutes #=> Integer + # resp.hours_of_operation_override.effective_from #=> String + # resp.hours_of_operation_override.effective_till #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DescribeHoursOfOperationOverride AWS API Documentation + # + # @overload describe_hours_of_operation_override(params = {}) + # @param [Hash] params ({}) + def describe_hours_of_operation_override(params = {}, options = {}) + req = build_request(:describe_hours_of_operation_override, params) + req.send_request(options) + end + # This API is in preview release for Amazon Connect and is subject to # change. # @@ -7559,6 +7702,54 @@ def get_current_user_data(params = {}, options = {}) req.send_request(options) end + # Get the hours of operations with the effective override applied. + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [required, String] :hours_of_operation_id + # The identifier for the hours of operation. + # + # @option params [required, String] :from_date + # The Date from when the hours of operation are listed. + # + # @option params [required, String] :to_date + # The Date until when the hours of operation are listed. + # + # @return [Types::GetEffectiveHoursOfOperationsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::GetEffectiveHoursOfOperationsResponse#effective_hours_of_operation_list #effective_hours_of_operation_list} => Array<Types::EffectiveHoursOfOperations> + # * {Types::GetEffectiveHoursOfOperationsResponse#time_zone #time_zone} => String + # + # @example Request syntax with placeholder values + # + # resp = client.get_effective_hours_of_operations({ + # instance_id: "InstanceId", # required + # hours_of_operation_id: "HoursOfOperationId", # required + # from_date: "HoursOfOperationOverrideYearMonthDayDateFormat", # required + # to_date: "HoursOfOperationOverrideYearMonthDayDateFormat", # required + # }) + # + # @example Response structure + # + # resp.effective_hours_of_operation_list #=> Array + # resp.effective_hours_of_operation_list[0].date #=> String + # resp.effective_hours_of_operation_list[0].operational_hours #=> Array + # resp.effective_hours_of_operation_list[0].operational_hours[0].start.hours #=> Integer + # resp.effective_hours_of_operation_list[0].operational_hours[0].start.minutes #=> Integer + # resp.effective_hours_of_operation_list[0].operational_hours[0].end.hours #=> Integer + # resp.effective_hours_of_operation_list[0].operational_hours[0].end.minutes #=> Integer + # resp.time_zone #=> String + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/GetEffectiveHoursOfOperations AWS API Documentation + # + # @overload get_effective_hours_of_operations(params = {}) + # @param [Hash] params ({}) + def get_effective_hours_of_operations(params = {}, options = {}) + req = build_request(:get_effective_hours_of_operations, params) + req.send_request(options) + end + # Supports SAML sign-in for Amazon Connect. Retrieves a token for # federation. The token is for the Amazon Connect user which corresponds # to the IAM credentials that were used to invoke this action. @@ -10703,6 +10894,71 @@ def list_flow_associations(params = {}, options = {}) req.send_request(options) end + # List the hours of operation overrides. + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [required, String] :hours_of_operation_id + # The identifier for the hours of operation + # + # @option params [String] :next_token + # The token for the next set of results. Use the value returned in the + # previous response in the next request to retrieve the next set of + # results. + # + # @option params [Integer] :max_results + # The maximum number of results to return per page. The default + # MaxResult size is 100. Valid Range: Minimum value of 1. Maximum value + # of 1000. + # + # @return [Types::ListHoursOfOperationOverridesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::ListHoursOfOperationOverridesResponse#next_token #next_token} => String + # * {Types::ListHoursOfOperationOverridesResponse#hours_of_operation_override_list #hours_of_operation_override_list} => Array<Types::HoursOfOperationOverride> + # * {Types::ListHoursOfOperationOverridesResponse#last_modified_region #last_modified_region} => String + # * {Types::ListHoursOfOperationOverridesResponse#last_modified_time #last_modified_time} => Time + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.list_hours_of_operation_overrides({ + # instance_id: "InstanceId", # required + # hours_of_operation_id: "HoursOfOperationId", # required + # next_token: "NextToken", + # max_results: 1, + # }) + # + # @example Response structure + # + # resp.next_token #=> String + # resp.hours_of_operation_override_list #=> Array + # resp.hours_of_operation_override_list[0].hours_of_operation_override_id #=> String + # resp.hours_of_operation_override_list[0].hours_of_operation_id #=> String + # resp.hours_of_operation_override_list[0].hours_of_operation_arn #=> String + # resp.hours_of_operation_override_list[0].name #=> String + # resp.hours_of_operation_override_list[0].description #=> String + # resp.hours_of_operation_override_list[0].config #=> Array + # resp.hours_of_operation_override_list[0].config[0].day #=> String, one of "SUNDAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY" + # resp.hours_of_operation_override_list[0].config[0].start_time.hours #=> Integer + # resp.hours_of_operation_override_list[0].config[0].start_time.minutes #=> Integer + # resp.hours_of_operation_override_list[0].config[0].end_time.hours #=> Integer + # resp.hours_of_operation_override_list[0].config[0].end_time.minutes #=> Integer + # resp.hours_of_operation_override_list[0].effective_from #=> String + # resp.hours_of_operation_override_list[0].effective_till #=> String + # resp.last_modified_region #=> String + # resp.last_modified_time #=> Time + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/ListHoursOfOperationOverrides AWS API Documentation + # + # @overload list_hours_of_operation_overrides(params = {}) + # @param [Hash] params ({}) + def list_hours_of_operation_overrides(params = {}, options = {}) + req = build_request(:list_hours_of_operation_overrides, params) + req.send_request(options) + end + # Provides information about the hours of operation for the specified # Amazon Connect instance. # @@ -13310,6 +13566,8 @@ def search_available_phone_numbers(params = {}, options = {}) # value: "String", # comparison_type: "STARTS_WITH", # accepts STARTS_WITH, CONTAINS, EXACT # }, + # state_condition: "ACTIVE", # accepts ACTIVE, ARCHIVED + # status_condition: "PUBLISHED", # accepts PUBLISHED, SAVED # }, # }) # @@ -13671,6 +13929,116 @@ def search_email_addresses(params = {}, options = {}) req.send_request(options) end + # Searches the hours of operation overrides. + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [String] :next_token + # The token for the next set of results. Use the value returned in the + # previous response in the next request to retrieve the next set of + # results. Length Constraints: Minimum length of 1. Maximum length of + # 2500. + # + # @option params [Integer] :max_results + # The maximum number of results to return per page. Valid Range: Minimum + # value of 1. Maximum value of 100. + # + # @option params [Types::HoursOfOperationSearchFilter] :search_filter + # Filters to be applied to search results. + # + # @option params [Types::HoursOfOperationOverrideSearchCriteria] :search_criteria + # The search criteria to be used to return hours of operations + # overrides. + # + # @return [Types::SearchHoursOfOperationOverridesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: + # + # * {Types::SearchHoursOfOperationOverridesResponse#hours_of_operation_overrides #hours_of_operation_overrides} => Array<Types::HoursOfOperationOverride> + # * {Types::SearchHoursOfOperationOverridesResponse#next_token #next_token} => String + # * {Types::SearchHoursOfOperationOverridesResponse#approximate_total_count #approximate_total_count} => Integer + # + # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}. + # + # @example Request syntax with placeholder values + # + # resp = client.search_hours_of_operation_overrides({ + # instance_id: "InstanceId", # required + # next_token: "NextToken2500", + # max_results: 1, + # search_filter: { + # tag_filter: { + # or_conditions: [ + # [ + # { + # tag_key: "String", + # tag_value: "String", + # }, + # ], + # ], + # and_conditions: [ + # { + # tag_key: "String", + # tag_value: "String", + # }, + # ], + # tag_condition: { + # tag_key: "String", + # tag_value: "String", + # }, + # }, + # }, + # search_criteria: { + # or_conditions: [ + # { + # # recursive HoursOfOperationOverrideSearchCriteria + # }, + # ], + # and_conditions: [ + # { + # # recursive HoursOfOperationOverrideSearchCriteria + # }, + # ], + # string_condition: { + # field_name: "String", + # value: "String", + # comparison_type: "STARTS_WITH", # accepts STARTS_WITH, CONTAINS, EXACT + # }, + # date_condition: { + # field_name: "String", + # value: "DateYearMonthDayFormat", + # comparison_type: "GREATER_THAN", # accepts GREATER_THAN, LESS_THAN, GREATER_THAN_OR_EQUAL_TO, LESS_THAN_OR_EQUAL_TO, EQUAL_TO + # }, + # }, + # }) + # + # @example Response structure + # + # resp.hours_of_operation_overrides #=> Array + # resp.hours_of_operation_overrides[0].hours_of_operation_override_id #=> String + # resp.hours_of_operation_overrides[0].hours_of_operation_id #=> String + # resp.hours_of_operation_overrides[0].hours_of_operation_arn #=> String + # resp.hours_of_operation_overrides[0].name #=> String + # resp.hours_of_operation_overrides[0].description #=> String + # resp.hours_of_operation_overrides[0].config #=> Array + # resp.hours_of_operation_overrides[0].config[0].day #=> String, one of "SUNDAY", "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", "SATURDAY" + # resp.hours_of_operation_overrides[0].config[0].start_time.hours #=> Integer + # resp.hours_of_operation_overrides[0].config[0].start_time.minutes #=> Integer + # resp.hours_of_operation_overrides[0].config[0].end_time.hours #=> Integer + # resp.hours_of_operation_overrides[0].config[0].end_time.minutes #=> Integer + # resp.hours_of_operation_overrides[0].effective_from #=> String + # resp.hours_of_operation_overrides[0].effective_till #=> String + # resp.next_token #=> String + # resp.approximate_total_count #=> Integer + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/SearchHoursOfOperationOverrides AWS API Documentation + # + # @overload search_hours_of_operation_overrides(params = {}) + # @param [Hash] params ({}) + def search_hours_of_operation_overrides(params = {}, options = {}) + req = build_request(:search_hours_of_operation_overrides, params) + req.send_request(options) + end + # Searches the hours of operation in an Amazon Connect instance, with # optional filtering. # @@ -18217,6 +18585,69 @@ def update_hours_of_operation(params = {}, options = {}) req.send_request(options) end + # Update the hours of operation override. + # + # @option params [required, String] :instance_id + # The identifier of the Amazon Connect instance. + # + # @option params [required, String] :hours_of_operation_id + # The identifier for the hours of operation. + # + # @option params [required, String] :hours_of_operation_override_id + # The identifier for the hours of operation override. + # + # @option params [String] :name + # The name of the hours of operation override. + # + # @option params [String] :description + # The description of the hours of operation override. + # + # @option params [Array] :config + # Configuration information for the hours of operation override: day, + # start time, and end time. + # + # @option params [String] :effective_from + # The date from when the hours of operation override would be effective. + # + # @option params [String] :effective_till + # The date till when the hours of operation override would be effective. + # + # @return [Struct] Returns an empty {Seahorse::Client::Response response}. + # + # @example Request syntax with placeholder values + # + # resp = client.update_hours_of_operation_override({ + # instance_id: "InstanceId", # required + # hours_of_operation_id: "HoursOfOperationId", # required + # hours_of_operation_override_id: "HoursOfOperationOverrideId", # required + # name: "CommonHumanReadableName", + # description: "CommonHumanReadableDescription", + # config: [ + # { + # day: "SUNDAY", # accepts SUNDAY, MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY + # start_time: { + # hours: 1, # required + # minutes: 1, # required + # }, + # end_time: { + # hours: 1, # required + # minutes: 1, # required + # }, + # }, + # ], + # effective_from: "HoursOfOperationOverrideYearMonthDayDateFormat", + # effective_till: "HoursOfOperationOverrideYearMonthDayDateFormat", + # }) + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/UpdateHoursOfOperationOverride AWS API Documentation + # + # @overload update_hours_of_operation_override(params = {}) + # @param [Hash] params ({}) + def update_hours_of_operation_override(params = {}, options = {}) + req = build_request(:update_hours_of_operation_override, params) + req.send_request(options) + end + # This API is in preview release for Amazon Connect and is subject to # change. # @@ -20069,7 +20500,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-connect' - context[:gem_version] = '1.189.0' + context[:gem_version] = '1.190.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-connect/lib/aws-sdk-connect/client_api.rb b/gems/aws-sdk-connect/lib/aws-sdk-connect/client_api.rb index 22d00a06c6e..79a39e8c1bb 100644 --- a/gems/aws-sdk-connect/lib/aws-sdk-connect/client_api.rb +++ b/gems/aws-sdk-connect/lib/aws-sdk-connect/client_api.rb @@ -165,6 +165,8 @@ module ClientApi ClientToken = Shapes::StringShape.new(name: 'ClientToken') CommonAttributeAndCondition = Shapes::StructureShape.new(name: 'CommonAttributeAndCondition') CommonAttributeOrConditionList = Shapes::ListShape.new(name: 'CommonAttributeOrConditionList') + CommonHumanReadableDescription = Shapes::StringShape.new(name: 'CommonHumanReadableDescription') + CommonHumanReadableName = Shapes::StringShape.new(name: 'CommonHumanReadableName') CommonNameLength127 = Shapes::StringShape.new(name: 'CommonNameLength127') Comparison = Shapes::StringShape.new(name: 'Comparison') ComparisonOperator = Shapes::StringShape.new(name: 'ComparisonOperator') @@ -250,6 +252,8 @@ module ClientApi CreateEmailAddressResponse = Shapes::StructureShape.new(name: 'CreateEmailAddressResponse') CreateEvaluationFormRequest = Shapes::StructureShape.new(name: 'CreateEvaluationFormRequest') CreateEvaluationFormResponse = Shapes::StructureShape.new(name: 'CreateEvaluationFormResponse') + CreateHoursOfOperationOverrideRequest = Shapes::StructureShape.new(name: 'CreateHoursOfOperationOverrideRequest') + CreateHoursOfOperationOverrideResponse = Shapes::StructureShape.new(name: 'CreateHoursOfOperationOverrideResponse') CreateHoursOfOperationRequest = Shapes::StructureShape.new(name: 'CreateHoursOfOperationRequest') CreateHoursOfOperationResponse = Shapes::StructureShape.new(name: 'CreateHoursOfOperationResponse') CreateInstanceRequest = Shapes::StructureShape.new(name: 'CreateInstanceRequest') @@ -310,7 +314,10 @@ module ClientApi CustomerVoiceActivity = Shapes::StructureShape.new(name: 'CustomerVoiceActivity') DataSetId = Shapes::StringShape.new(name: 'DataSetId') DataSetIds = Shapes::ListShape.new(name: 'DataSetIds') + DateComparisonType = Shapes::StringShape.new(name: 'DateComparisonType') + DateCondition = Shapes::StructureShape.new(name: 'DateCondition') DateReference = Shapes::StructureShape.new(name: 'DateReference') + DateYearMonthDayFormat = Shapes::StringShape.new(name: 'DateYearMonthDayFormat') DeactivateEvaluationFormRequest = Shapes::StructureShape.new(name: 'DeactivateEvaluationFormRequest') DeactivateEvaluationFormResponse = Shapes::StructureShape.new(name: 'DeactivateEvaluationFormResponse') DefaultVocabulary = Shapes::StructureShape.new(name: 'DefaultVocabulary') @@ -326,6 +333,7 @@ module ClientApi DeleteEmailAddressRequest = Shapes::StructureShape.new(name: 'DeleteEmailAddressRequest') DeleteEmailAddressResponse = Shapes::StructureShape.new(name: 'DeleteEmailAddressResponse') DeleteEvaluationFormRequest = Shapes::StructureShape.new(name: 'DeleteEvaluationFormRequest') + DeleteHoursOfOperationOverrideRequest = Shapes::StructureShape.new(name: 'DeleteHoursOfOperationOverrideRequest') DeleteHoursOfOperationRequest = Shapes::StructureShape.new(name: 'DeleteHoursOfOperationRequest') DeleteInstanceRequest = Shapes::StructureShape.new(name: 'DeleteInstanceRequest') DeleteIntegrationAssociationRequest = Shapes::StructureShape.new(name: 'DeleteIntegrationAssociationRequest') @@ -367,6 +375,8 @@ module ClientApi DescribeEmailAddressResponse = Shapes::StructureShape.new(name: 'DescribeEmailAddressResponse') DescribeEvaluationFormRequest = Shapes::StructureShape.new(name: 'DescribeEvaluationFormRequest') DescribeEvaluationFormResponse = Shapes::StructureShape.new(name: 'DescribeEvaluationFormResponse') + DescribeHoursOfOperationOverrideRequest = Shapes::StructureShape.new(name: 'DescribeHoursOfOperationOverrideRequest') + DescribeHoursOfOperationOverrideResponse = Shapes::StructureShape.new(name: 'DescribeHoursOfOperationOverrideResponse') DescribeHoursOfOperationRequest = Shapes::StructureShape.new(name: 'DescribeHoursOfOperationRequest') DescribeHoursOfOperationResponse = Shapes::StructureShape.new(name: 'DescribeHoursOfOperationResponse') DescribeInstanceAttributeRequest = Shapes::StructureShape.new(name: 'DescribeInstanceAttributeRequest') @@ -445,6 +455,8 @@ module ClientApi DownloadUrlMetadata = Shapes::StructureShape.new(name: 'DownloadUrlMetadata') DuplicateResourceException = Shapes::StructureShape.new(name: 'DuplicateResourceException') DurationInSeconds = Shapes::IntegerShape.new(name: 'DurationInSeconds') + EffectiveHoursOfOperationList = Shapes::ListShape.new(name: 'EffectiveHoursOfOperationList') + EffectiveHoursOfOperations = Shapes::StructureShape.new(name: 'EffectiveHoursOfOperations') Email = Shapes::StringShape.new(name: 'Email') EmailAddress = Shapes::StringShape.new(name: 'EmailAddress') EmailAddressArn = Shapes::StringShape.new(name: 'EmailAddressArn') @@ -574,6 +586,8 @@ module ClientApi GetCurrentMetricDataResponse = Shapes::StructureShape.new(name: 'GetCurrentMetricDataResponse') GetCurrentUserDataRequest = Shapes::StructureShape.new(name: 'GetCurrentUserDataRequest') GetCurrentUserDataResponse = Shapes::StructureShape.new(name: 'GetCurrentUserDataResponse') + GetEffectiveHoursOfOperationsRequest = Shapes::StructureShape.new(name: 'GetEffectiveHoursOfOperationsRequest') + GetEffectiveHoursOfOperationsResponse = Shapes::StructureShape.new(name: 'GetEffectiveHoursOfOperationsResponse') GetFederationTokenRequest = Shapes::StructureShape.new(name: 'GetFederationTokenRequest') GetFederationTokenResponse = Shapes::StructureShape.new(name: 'GetFederationTokenResponse') GetFlowAssociationRequest = Shapes::StructureShape.new(name: 'GetFlowAssociationRequest') @@ -630,6 +644,14 @@ module ClientApi HoursOfOperationId = Shapes::StringShape.new(name: 'HoursOfOperationId') HoursOfOperationList = Shapes::ListShape.new(name: 'HoursOfOperationList') HoursOfOperationName = Shapes::StringShape.new(name: 'HoursOfOperationName') + HoursOfOperationOverride = Shapes::StructureShape.new(name: 'HoursOfOperationOverride') + HoursOfOperationOverrideConfig = Shapes::StructureShape.new(name: 'HoursOfOperationOverrideConfig') + HoursOfOperationOverrideConfigList = Shapes::ListShape.new(name: 'HoursOfOperationOverrideConfigList') + HoursOfOperationOverrideId = Shapes::StringShape.new(name: 'HoursOfOperationOverrideId') + HoursOfOperationOverrideList = Shapes::ListShape.new(name: 'HoursOfOperationOverrideList') + HoursOfOperationOverrideSearchConditionList = Shapes::ListShape.new(name: 'HoursOfOperationOverrideSearchConditionList') + HoursOfOperationOverrideSearchCriteria = Shapes::StructureShape.new(name: 'HoursOfOperationOverrideSearchCriteria') + HoursOfOperationOverrideYearMonthDayDateFormat = Shapes::StringShape.new(name: 'HoursOfOperationOverrideYearMonthDayDateFormat') HoursOfOperationSearchConditionList = Shapes::ListShape.new(name: 'HoursOfOperationSearchConditionList') HoursOfOperationSearchCriteria = Shapes::StructureShape.new(name: 'HoursOfOperationSearchCriteria') HoursOfOperationSearchFilter = Shapes::StructureShape.new(name: 'HoursOfOperationSearchFilter') @@ -730,6 +752,8 @@ module ClientApi ListFlowAssociationResourceType = Shapes::StringShape.new(name: 'ListFlowAssociationResourceType') ListFlowAssociationsRequest = Shapes::StructureShape.new(name: 'ListFlowAssociationsRequest') ListFlowAssociationsResponse = Shapes::StructureShape.new(name: 'ListFlowAssociationsResponse') + ListHoursOfOperationOverridesRequest = Shapes::StructureShape.new(name: 'ListHoursOfOperationOverridesRequest') + ListHoursOfOperationOverridesResponse = Shapes::StructureShape.new(name: 'ListHoursOfOperationOverridesResponse') ListHoursOfOperationsRequest = Shapes::StructureShape.new(name: 'ListHoursOfOperationsRequest') ListHoursOfOperationsResponse = Shapes::StructureShape.new(name: 'ListHoursOfOperationsResponse') ListInstanceAttributesRequest = Shapes::StructureShape.new(name: 'ListInstanceAttributesRequest') @@ -853,6 +877,8 @@ module ClientApi NumericQuestionPropertyAutomationLabel = Shapes::StringShape.new(name: 'NumericQuestionPropertyAutomationLabel') NumericQuestionPropertyValueAutomation = Shapes::StructureShape.new(name: 'NumericQuestionPropertyValueAutomation') OperatingSystem = Shapes::StringShape.new(name: 'OperatingSystem') + OperationalHour = Shapes::StructureShape.new(name: 'OperationalHour') + OperationalHours = Shapes::ListShape.new(name: 'OperationalHours') Origin = Shapes::StringShape.new(name: 'Origin') OriginsList = Shapes::ListShape.new(name: 'OriginsList') OutboundAdditionalRecipients = Shapes::StructureShape.new(name: 'OutboundAdditionalRecipients') @@ -867,6 +893,8 @@ module ClientApi OutboundRequestId = Shapes::StringShape.new(name: 'OutboundRequestId') OutboundSubject = Shapes::StringShape.new(name: 'OutboundSubject') OutputTypeNotFoundException = Shapes::StructureShape.new(name: 'OutputTypeNotFoundException') + OverrideDays = Shapes::StringShape.new(name: 'OverrideDays') + OverrideTimeSlice = Shapes::StructureShape.new(name: 'OverrideTimeSlice') PEM = Shapes::StringShape.new(name: 'PEM') ParticipantCapabilities = Shapes::StructureShape.new(name: 'ParticipantCapabilities') ParticipantDetails = Shapes::StructureShape.new(name: 'ParticipantDetails') @@ -1116,6 +1144,8 @@ module ClientApi SearchCriteria = Shapes::StructureShape.new(name: 'SearchCriteria') SearchEmailAddressesRequest = Shapes::StructureShape.new(name: 'SearchEmailAddressesRequest') SearchEmailAddressesResponse = Shapes::StructureShape.new(name: 'SearchEmailAddressesResponse') + SearchHoursOfOperationOverridesRequest = Shapes::StructureShape.new(name: 'SearchHoursOfOperationOverridesRequest') + SearchHoursOfOperationOverridesResponse = Shapes::StructureShape.new(name: 'SearchHoursOfOperationOverridesResponse') SearchHoursOfOperationsRequest = Shapes::StructureShape.new(name: 'SearchHoursOfOperationsRequest') SearchHoursOfOperationsResponse = Shapes::StructureShape.new(name: 'SearchHoursOfOperationsResponse') SearchPredefinedAttributesRequest = Shapes::StructureShape.new(name: 'SearchPredefinedAttributesRequest') @@ -1355,6 +1385,7 @@ module ClientApi UpdateEvaluationFormRequest = Shapes::StructureShape.new(name: 'UpdateEvaluationFormRequest') UpdateEvaluationFormResponse = Shapes::StructureShape.new(name: 'UpdateEvaluationFormResponse') UpdateHoursOfOperationDescription = Shapes::StringShape.new(name: 'UpdateHoursOfOperationDescription') + UpdateHoursOfOperationOverrideRequest = Shapes::StructureShape.new(name: 'UpdateHoursOfOperationOverrideRequest') UpdateHoursOfOperationRequest = Shapes::StructureShape.new(name: 'UpdateHoursOfOperationRequest') UpdateInstanceAttributeRequest = Shapes::StructureShape.new(name: 'UpdateInstanceAttributeRequest') UpdateInstanceStorageConfigRequest = Shapes::StructureShape.new(name: 'UpdateInstanceStorageConfigRequest') @@ -2021,6 +2052,8 @@ module ClientApi ContactFlowModuleSearchCriteria.add_member(:or_conditions, Shapes::ShapeRef.new(shape: ContactFlowModuleSearchConditionList, location_name: "OrConditions")) ContactFlowModuleSearchCriteria.add_member(:and_conditions, Shapes::ShapeRef.new(shape: ContactFlowModuleSearchConditionList, location_name: "AndConditions")) ContactFlowModuleSearchCriteria.add_member(:string_condition, Shapes::ShapeRef.new(shape: StringCondition, location_name: "StringCondition")) + ContactFlowModuleSearchCriteria.add_member(:state_condition, Shapes::ShapeRef.new(shape: ContactFlowModuleState, location_name: "StateCondition")) + ContactFlowModuleSearchCriteria.add_member(:status_condition, Shapes::ShapeRef.new(shape: ContactFlowModuleStatus, location_name: "StatusCondition")) ContactFlowModuleSearchCriteria.struct_class = Types::ContactFlowModuleSearchCriteria ContactFlowModuleSearchFilter.add_member(:tag_filter, Shapes::ShapeRef.new(shape: ControlPlaneTagFilter, location_name: "TagFilter")) @@ -2229,6 +2262,18 @@ module ClientApi CreateEvaluationFormResponse.add_member(:evaluation_form_arn, Shapes::ShapeRef.new(shape: ARN, required: true, location_name: "EvaluationFormArn")) CreateEvaluationFormResponse.struct_class = Types::CreateEvaluationFormResponse + CreateHoursOfOperationOverrideRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) + CreateHoursOfOperationOverrideRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) + CreateHoursOfOperationOverrideRequest.add_member(:name, Shapes::ShapeRef.new(shape: CommonHumanReadableName, required: true, location_name: "Name")) + CreateHoursOfOperationOverrideRequest.add_member(:description, Shapes::ShapeRef.new(shape: CommonHumanReadableDescription, location_name: "Description")) + CreateHoursOfOperationOverrideRequest.add_member(:config, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideConfigList, required: true, location_name: "Config")) + CreateHoursOfOperationOverrideRequest.add_member(:effective_from, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, required: true, location_name: "EffectiveFrom")) + CreateHoursOfOperationOverrideRequest.add_member(:effective_till, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, required: true, location_name: "EffectiveTill")) + CreateHoursOfOperationOverrideRequest.struct_class = Types::CreateHoursOfOperationOverrideRequest + + CreateHoursOfOperationOverrideResponse.add_member(:hours_of_operation_override_id, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideId, location_name: "HoursOfOperationOverrideId")) + CreateHoursOfOperationOverrideResponse.struct_class = Types::CreateHoursOfOperationOverrideResponse + CreateHoursOfOperationRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) CreateHoursOfOperationRequest.add_member(:name, Shapes::ShapeRef.new(shape: CommonNameLength127, required: true, location_name: "Name")) CreateHoursOfOperationRequest.add_member(:description, Shapes::ShapeRef.new(shape: HoursOfOperationDescription, location_name: "Description")) @@ -2534,6 +2579,11 @@ module ClientApi DataSetIds.member = Shapes::ShapeRef.new(shape: DataSetId) + DateCondition.add_member(:field_name, Shapes::ShapeRef.new(shape: String, location_name: "FieldName")) + DateCondition.add_member(:value, Shapes::ShapeRef.new(shape: DateYearMonthDayFormat, location_name: "Value")) + DateCondition.add_member(:comparison_type, Shapes::ShapeRef.new(shape: DateComparisonType, location_name: "ComparisonType")) + DateCondition.struct_class = Types::DateCondition + DateReference.add_member(:name, Shapes::ShapeRef.new(shape: ReferenceKey, location_name: "Name")) DateReference.add_member(:value, Shapes::ShapeRef.new(shape: ReferenceValue, location_name: "Value")) DateReference.struct_class = Types::DateReference @@ -2590,6 +2640,11 @@ module ClientApi DeleteEvaluationFormRequest.add_member(:evaluation_form_version, Shapes::ShapeRef.new(shape: VersionNumber, location: "querystring", location_name: "version", metadata: {"box"=>true})) DeleteEvaluationFormRequest.struct_class = Types::DeleteEvaluationFormRequest + DeleteHoursOfOperationOverrideRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) + DeleteHoursOfOperationOverrideRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) + DeleteHoursOfOperationOverrideRequest.add_member(:hours_of_operation_override_id, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideId, required: true, location: "uri", location_name: "HoursOfOperationOverrideId")) + DeleteHoursOfOperationOverrideRequest.struct_class = Types::DeleteHoursOfOperationOverrideRequest + DeleteHoursOfOperationRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) DeleteHoursOfOperationRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) DeleteHoursOfOperationRequest.struct_class = Types::DeleteHoursOfOperationRequest @@ -2747,6 +2802,14 @@ module ClientApi DescribeEvaluationFormResponse.add_member(:evaluation_form, Shapes::ShapeRef.new(shape: EvaluationForm, required: true, location_name: "EvaluationForm")) DescribeEvaluationFormResponse.struct_class = Types::DescribeEvaluationFormResponse + DescribeHoursOfOperationOverrideRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) + DescribeHoursOfOperationOverrideRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) + DescribeHoursOfOperationOverrideRequest.add_member(:hours_of_operation_override_id, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideId, required: true, location: "uri", location_name: "HoursOfOperationOverrideId")) + DescribeHoursOfOperationOverrideRequest.struct_class = Types::DescribeHoursOfOperationOverrideRequest + + DescribeHoursOfOperationOverrideResponse.add_member(:hours_of_operation_override, Shapes::ShapeRef.new(shape: HoursOfOperationOverride, location_name: "HoursOfOperationOverride")) + DescribeHoursOfOperationOverrideResponse.struct_class = Types::DescribeHoursOfOperationOverrideResponse + DescribeHoursOfOperationRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) DescribeHoursOfOperationRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) DescribeHoursOfOperationRequest.struct_class = Types::DescribeHoursOfOperationRequest @@ -2979,6 +3042,12 @@ module ClientApi DuplicateResourceException.add_member(:message, Shapes::ShapeRef.new(shape: Message, location_name: "Message")) DuplicateResourceException.struct_class = Types::DuplicateResourceException + EffectiveHoursOfOperationList.member = Shapes::ShapeRef.new(shape: EffectiveHoursOfOperations) + + EffectiveHoursOfOperations.add_member(:date, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, location_name: "Date")) + EffectiveHoursOfOperations.add_member(:operational_hours, Shapes::ShapeRef.new(shape: OperationalHours, location_name: "OperationalHours")) + EffectiveHoursOfOperations.struct_class = Types::EffectiveHoursOfOperations + EmailAddressInfo.add_member(:email_address, Shapes::ShapeRef.new(shape: EmailAddress, required: true, location_name: "EmailAddress")) EmailAddressInfo.add_member(:display_name, Shapes::ShapeRef.new(shape: EmailAddressDisplayName, location_name: "DisplayName")) EmailAddressInfo.struct_class = Types::EmailAddressInfo @@ -3364,6 +3433,16 @@ module ClientApi GetCurrentUserDataResponse.add_member(:approximate_total_count, Shapes::ShapeRef.new(shape: ApproximateTotalCount, location_name: "ApproximateTotalCount")) GetCurrentUserDataResponse.struct_class = Types::GetCurrentUserDataResponse + GetEffectiveHoursOfOperationsRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) + GetEffectiveHoursOfOperationsRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) + GetEffectiveHoursOfOperationsRequest.add_member(:from_date, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, required: true, location: "querystring", location_name: "fromDate")) + GetEffectiveHoursOfOperationsRequest.add_member(:to_date, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, required: true, location: "querystring", location_name: "toDate")) + GetEffectiveHoursOfOperationsRequest.struct_class = Types::GetEffectiveHoursOfOperationsRequest + + GetEffectiveHoursOfOperationsResponse.add_member(:effective_hours_of_operation_list, Shapes::ShapeRef.new(shape: EffectiveHoursOfOperationList, location_name: "EffectiveHoursOfOperationList")) + GetEffectiveHoursOfOperationsResponse.add_member(:time_zone, Shapes::ShapeRef.new(shape: TimeZone, location_name: "TimeZone")) + GetEffectiveHoursOfOperationsResponse.struct_class = Types::GetEffectiveHoursOfOperationsResponse + GetFederationTokenRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) GetFederationTokenRequest.struct_class = Types::GetFederationTokenRequest @@ -3572,6 +3651,33 @@ module ClientApi HoursOfOperationList.member = Shapes::ShapeRef.new(shape: HoursOfOperation) + HoursOfOperationOverride.add_member(:hours_of_operation_override_id, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideId, location_name: "HoursOfOperationOverrideId")) + HoursOfOperationOverride.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, location_name: "HoursOfOperationId")) + HoursOfOperationOverride.add_member(:hours_of_operation_arn, Shapes::ShapeRef.new(shape: ARN, location_name: "HoursOfOperationArn")) + HoursOfOperationOverride.add_member(:name, Shapes::ShapeRef.new(shape: CommonHumanReadableName, location_name: "Name")) + HoursOfOperationOverride.add_member(:description, Shapes::ShapeRef.new(shape: CommonHumanReadableDescription, location_name: "Description")) + HoursOfOperationOverride.add_member(:config, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideConfigList, location_name: "Config")) + HoursOfOperationOverride.add_member(:effective_from, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, location_name: "EffectiveFrom")) + HoursOfOperationOverride.add_member(:effective_till, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, location_name: "EffectiveTill")) + HoursOfOperationOverride.struct_class = Types::HoursOfOperationOverride + + HoursOfOperationOverrideConfig.add_member(:day, Shapes::ShapeRef.new(shape: OverrideDays, location_name: "Day")) + HoursOfOperationOverrideConfig.add_member(:start_time, Shapes::ShapeRef.new(shape: OverrideTimeSlice, location_name: "StartTime")) + HoursOfOperationOverrideConfig.add_member(:end_time, Shapes::ShapeRef.new(shape: OverrideTimeSlice, location_name: "EndTime")) + HoursOfOperationOverrideConfig.struct_class = Types::HoursOfOperationOverrideConfig + + HoursOfOperationOverrideConfigList.member = Shapes::ShapeRef.new(shape: HoursOfOperationOverrideConfig) + + HoursOfOperationOverrideList.member = Shapes::ShapeRef.new(shape: HoursOfOperationOverride) + + HoursOfOperationOverrideSearchConditionList.member = Shapes::ShapeRef.new(shape: HoursOfOperationOverrideSearchCriteria) + + HoursOfOperationOverrideSearchCriteria.add_member(:or_conditions, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideSearchConditionList, location_name: "OrConditions")) + HoursOfOperationOverrideSearchCriteria.add_member(:and_conditions, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideSearchConditionList, location_name: "AndConditions")) + HoursOfOperationOverrideSearchCriteria.add_member(:string_condition, Shapes::ShapeRef.new(shape: StringCondition, location_name: "StringCondition")) + HoursOfOperationOverrideSearchCriteria.add_member(:date_condition, Shapes::ShapeRef.new(shape: DateCondition, location_name: "DateCondition")) + HoursOfOperationOverrideSearchCriteria.struct_class = Types::HoursOfOperationOverrideSearchCriteria + HoursOfOperationSearchConditionList.member = Shapes::ShapeRef.new(shape: HoursOfOperationSearchCriteria) HoursOfOperationSearchCriteria.add_member(:or_conditions, Shapes::ShapeRef.new(shape: HoursOfOperationSearchConditionList, location_name: "OrConditions")) @@ -3890,6 +3996,18 @@ module ClientApi ListFlowAssociationsResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) ListFlowAssociationsResponse.struct_class = Types::ListFlowAssociationsResponse + ListHoursOfOperationOverridesRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) + ListHoursOfOperationOverridesRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) + ListHoursOfOperationOverridesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken")) + ListHoursOfOperationOverridesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResult100, location: "querystring", location_name: "maxResults", metadata: {"box"=>true})) + ListHoursOfOperationOverridesRequest.struct_class = Types::ListHoursOfOperationOverridesRequest + + ListHoursOfOperationOverridesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location_name: "NextToken")) + ListHoursOfOperationOverridesResponse.add_member(:hours_of_operation_override_list, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideList, location_name: "HoursOfOperationOverrideList")) + ListHoursOfOperationOverridesResponse.add_member(:last_modified_region, Shapes::ShapeRef.new(shape: RegionName, location_name: "LastModifiedRegion")) + ListHoursOfOperationOverridesResponse.add_member(:last_modified_time, Shapes::ShapeRef.new(shape: Timestamp, location_name: "LastModifiedTime")) + ListHoursOfOperationOverridesResponse.struct_class = Types::ListHoursOfOperationOverridesResponse + ListHoursOfOperationsRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) ListHoursOfOperationsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken, location: "querystring", location_name: "nextToken")) ListHoursOfOperationsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResult1000, location: "querystring", location_name: "maxResults")) @@ -4322,6 +4440,12 @@ module ClientApi NumericQuestionPropertyValueAutomation.add_member(:label, Shapes::ShapeRef.new(shape: NumericQuestionPropertyAutomationLabel, required: true, location_name: "Label")) NumericQuestionPropertyValueAutomation.struct_class = Types::NumericQuestionPropertyValueAutomation + OperationalHour.add_member(:start, Shapes::ShapeRef.new(shape: OverrideTimeSlice, location_name: "Start")) + OperationalHour.add_member(:end, Shapes::ShapeRef.new(shape: OverrideTimeSlice, location_name: "End")) + OperationalHour.struct_class = Types::OperationalHour + + OperationalHours.member = Shapes::ShapeRef.new(shape: OperationalHour) + OriginsList.member = Shapes::ShapeRef.new(shape: Origin) OutboundAdditionalRecipients.add_member(:cc_email_addresses, Shapes::ShapeRef.new(shape: EmailAddressRecipientList, location_name: "CcEmailAddresses")) @@ -4351,6 +4475,10 @@ module ClientApi OutputTypeNotFoundException.add_member(:message, Shapes::ShapeRef.new(shape: Message, location_name: "Message")) OutputTypeNotFoundException.struct_class = Types::OutputTypeNotFoundException + OverrideTimeSlice.add_member(:hours, Shapes::ShapeRef.new(shape: Hours24Format, required: true, location_name: "Hours", metadata: {"box"=>true})) + OverrideTimeSlice.add_member(:minutes, Shapes::ShapeRef.new(shape: MinutesLimit60, required: true, location_name: "Minutes", metadata: {"box"=>true})) + OverrideTimeSlice.struct_class = Types::OverrideTimeSlice + ParticipantCapabilities.add_member(:video, Shapes::ShapeRef.new(shape: VideoCapability, location_name: "Video")) ParticipantCapabilities.add_member(:screen_share, Shapes::ShapeRef.new(shape: ScreenShareCapability, location_name: "ScreenShare")) ParticipantCapabilities.struct_class = Types::ParticipantCapabilities @@ -5034,6 +5162,18 @@ module ClientApi SearchEmailAddressesResponse.add_member(:approximate_total_count, Shapes::ShapeRef.new(shape: ApproximateTotalCount, location_name: "ApproximateTotalCount")) SearchEmailAddressesResponse.struct_class = Types::SearchEmailAddressesResponse + SearchHoursOfOperationOverridesRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location_name: "InstanceId")) + SearchHoursOfOperationOverridesRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken2500, location_name: "NextToken")) + SearchHoursOfOperationOverridesRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResult100, location_name: "MaxResults", metadata: {"box"=>true})) + SearchHoursOfOperationOverridesRequest.add_member(:search_filter, Shapes::ShapeRef.new(shape: HoursOfOperationSearchFilter, location_name: "SearchFilter")) + SearchHoursOfOperationOverridesRequest.add_member(:search_criteria, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideSearchCriteria, location_name: "SearchCriteria")) + SearchHoursOfOperationOverridesRequest.struct_class = Types::SearchHoursOfOperationOverridesRequest + + SearchHoursOfOperationOverridesResponse.add_member(:hours_of_operation_overrides, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideList, location_name: "HoursOfOperationOverrides")) + SearchHoursOfOperationOverridesResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken2500, location_name: "NextToken")) + SearchHoursOfOperationOverridesResponse.add_member(:approximate_total_count, Shapes::ShapeRef.new(shape: ApproximateTotalCount, location_name: "ApproximateTotalCount")) + SearchHoursOfOperationOverridesResponse.struct_class = Types::SearchHoursOfOperationOverridesResponse + SearchHoursOfOperationsRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location_name: "InstanceId")) SearchHoursOfOperationsRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: NextToken2500, location_name: "NextToken")) SearchHoursOfOperationsRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResult100, location_name: "MaxResults", metadata: {"box"=>true})) @@ -5860,6 +6000,16 @@ module ClientApi UpdateEvaluationFormResponse.add_member(:evaluation_form_version, Shapes::ShapeRef.new(shape: VersionNumber, required: true, location_name: "EvaluationFormVersion")) UpdateEvaluationFormResponse.struct_class = Types::UpdateEvaluationFormResponse + UpdateHoursOfOperationOverrideRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) + UpdateHoursOfOperationOverrideRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) + UpdateHoursOfOperationOverrideRequest.add_member(:hours_of_operation_override_id, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideId, required: true, location: "uri", location_name: "HoursOfOperationOverrideId")) + UpdateHoursOfOperationOverrideRequest.add_member(:name, Shapes::ShapeRef.new(shape: CommonHumanReadableName, location_name: "Name")) + UpdateHoursOfOperationOverrideRequest.add_member(:description, Shapes::ShapeRef.new(shape: CommonHumanReadableDescription, location_name: "Description")) + UpdateHoursOfOperationOverrideRequest.add_member(:config, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideConfigList, location_name: "Config")) + UpdateHoursOfOperationOverrideRequest.add_member(:effective_from, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, location_name: "EffectiveFrom")) + UpdateHoursOfOperationOverrideRequest.add_member(:effective_till, Shapes::ShapeRef.new(shape: HoursOfOperationOverrideYearMonthDayDateFormat, location_name: "EffectiveTill")) + UpdateHoursOfOperationOverrideRequest.struct_class = Types::UpdateHoursOfOperationOverrideRequest + UpdateHoursOfOperationRequest.add_member(:instance_id, Shapes::ShapeRef.new(shape: InstanceId, required: true, location: "uri", location_name: "InstanceId")) UpdateHoursOfOperationRequest.add_member(:hours_of_operation_id, Shapes::ShapeRef.new(shape: HoursOfOperationId, required: true, location: "uri", location_name: "HoursOfOperationId")) UpdateHoursOfOperationRequest.add_member(:name, Shapes::ShapeRef.new(shape: CommonNameLength127, location_name: "Name")) @@ -6783,6 +6933,21 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) end) + api.add_operation(:create_hours_of_operation_override, Seahorse::Model::Operation.new.tap do |o| + o.name = "CreateHoursOfOperationOverride" + o.http_method = "PUT" + o.http_request_uri = "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides" + o.input = Shapes::ShapeRef.new(shape: CreateHoursOfOperationOverrideRequest) + o.output = Shapes::ShapeRef.new(shape: CreateHoursOfOperationOverrideResponse) + o.errors << Shapes::ShapeRef.new(shape: DuplicateResourceException) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: LimitExceededException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + end) + api.add_operation(:create_instance, Seahorse::Model::Operation.new.tap do |o| o.name = "CreateInstance" o.http_method = "PUT" @@ -7183,6 +7348,19 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) end) + api.add_operation(:delete_hours_of_operation_override, Seahorse::Model::Operation.new.tap do |o| + o.name = "DeleteHoursOfOperationOverride" + o.http_method = "DELETE" + o.http_request_uri = "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + o.input = Shapes::ShapeRef.new(shape: DeleteHoursOfOperationOverrideRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + end) + api.add_operation(:delete_instance, Seahorse::Model::Operation.new.tap do |o| o.name = "DeleteInstance" o.http_method = "DELETE" @@ -7542,6 +7720,19 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) end) + api.add_operation(:describe_hours_of_operation_override, Seahorse::Model::Operation.new.tap do |o| + o.name = "DescribeHoursOfOperationOverride" + o.http_method = "GET" + o.http_request_uri = "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + o.input = Shapes::ShapeRef.new(shape: DescribeHoursOfOperationOverrideRequest) + o.output = Shapes::ShapeRef.new(shape: DescribeHoursOfOperationOverrideResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + end) + api.add_operation(:describe_instance, Seahorse::Model::Operation.new.tap do |o| o.name = "DescribeInstance" o.http_method = "GET" @@ -8008,6 +8199,19 @@ module ClientApi ) end) + api.add_operation(:get_effective_hours_of_operations, Seahorse::Model::Operation.new.tap do |o| + o.name = "GetEffectiveHoursOfOperations" + o.http_method = "GET" + o.http_request_uri = "/effective-hours-of-operations/{InstanceId}/{HoursOfOperationId}" + o.input = Shapes::ShapeRef.new(shape: GetEffectiveHoursOfOperationsRequest) + o.output = Shapes::ShapeRef.new(shape: GetEffectiveHoursOfOperationsResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + end) + api.add_operation(:get_federation_token, Seahorse::Model::Operation.new.tap do |o| o.name = "GetFederationToken" o.http_method = "GET" @@ -8396,6 +8600,25 @@ module ClientApi ) end) + api.add_operation(:list_hours_of_operation_overrides, Seahorse::Model::Operation.new.tap do |o| + o.name = "ListHoursOfOperationOverrides" + o.http_method = "GET" + o.http_request_uri = "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides" + o.input = Shapes::ShapeRef.new(shape: ListHoursOfOperationOverridesRequest) + o.output = Shapes::ShapeRef.new(shape: ListHoursOfOperationOverridesResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + api.add_operation(:list_hours_of_operations, Seahorse::Model::Operation.new.tap do |o| o.name = "ListHoursOfOperations" o.http_method = "GET" @@ -9205,6 +9428,25 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) end) + api.add_operation(:search_hours_of_operation_overrides, Seahorse::Model::Operation.new.tap do |o| + o.name = "SearchHoursOfOperationOverrides" + o.http_method = "POST" + o.http_request_uri = "/search-hours-of-operation-overrides" + o.input = Shapes::ShapeRef.new(shape: SearchHoursOfOperationOverridesRequest) + o.output = Shapes::ShapeRef.new(shape: SearchHoursOfOperationOverridesResponse) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + o[:pager] = Aws::Pager.new( + limit_key: "max_results", + tokens: { + "next_token" => "next_token" + } + ) + end) + api.add_operation(:search_hours_of_operations, Seahorse::Model::Operation.new.tap do |o| o.name = "SearchHoursOfOperations" o.http_method = "POST" @@ -9946,6 +10188,21 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) end) + api.add_operation(:update_hours_of_operation_override, Seahorse::Model::Operation.new.tap do |o| + o.name = "UpdateHoursOfOperationOverride" + o.http_method = "POST" + o.http_request_uri = "/hours-of-operations/{InstanceId}/{HoursOfOperationId}/overrides/{HoursOfOperationOverrideId}" + o.input = Shapes::ShapeRef.new(shape: UpdateHoursOfOperationOverrideRequest) + o.output = Shapes::ShapeRef.new(shape: Shapes::StructureShape.new(struct_class: Aws::EmptyStructure)) + o.errors << Shapes::ShapeRef.new(shape: DuplicateResourceException) + o.errors << Shapes::ShapeRef.new(shape: InvalidRequestException) + o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException) + o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException) + o.errors << Shapes::ShapeRef.new(shape: ThrottlingException) + o.errors << Shapes::ShapeRef.new(shape: InternalServiceException) + o.errors << Shapes::ShapeRef.new(shape: ConditionalOperationFailedException) + end) + api.add_operation(:update_instance_attribute, Seahorse::Model::Operation.new.tap do |o| o.name = "UpdateInstanceAttribute" o.http_method = "POST" diff --git a/gems/aws-sdk-connect/lib/aws-sdk-connect/types.rb b/gems/aws-sdk-connect/lib/aws-sdk-connect/types.rb index 7f018671557..ba80db2a2aa 100644 --- a/gems/aws-sdk-connect/lib/aws-sdk-connect/types.rb +++ b/gems/aws-sdk-connect/lib/aws-sdk-connect/types.rb @@ -2232,7 +2232,7 @@ class Condition < Struct.new( include Aws::Structure end - # A conditional check failed. + # Request processing failed because dependent condition failed. # # @!attribute [rw] message # @return [String] @@ -2532,6 +2532,11 @@ class ContactAnalysis < Struct.new( # # @!attribute [rw] participant_role # The role of the participant in the chat conversation. + # + # Only `CUSTOMER` is currently supported. Any other values other than + # `CUSTOMER` will result in an exception (4xx error). + # + # # @return [String] # # @!attribute [rw] include_raw_message @@ -2779,12 +2784,22 @@ class ContactFlowModule < Struct.new( # condition. # @return [Types::StringCondition] # + # @!attribute [rw] state_condition + # The state of the flow. + # @return [String] + # + # @!attribute [rw] status_condition + # The status of the flow. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/ContactFlowModuleSearchCriteria AWS API Documentation # class ContactFlowModuleSearchCriteria < Struct.new( :or_conditions, :and_conditions, - :string_condition) + :string_condition, + :state_condition, + :status_condition) SENSITIVE = [] include Aws::Structure end @@ -3817,6 +3832,63 @@ class CreateEvaluationFormResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation + # @return [String] + # + # @!attribute [rw] name + # The name of the hours of operation override. + # @return [String] + # + # @!attribute [rw] description + # The description of the hours of operation override. + # @return [String] + # + # @!attribute [rw] config + # Configuration information for the hours of operation override: day, + # start time, and end time. + # @return [Array] + # + # @!attribute [rw] effective_from + # The date from when the hours of operation override would be + # effective. + # @return [String] + # + # @!attribute [rw] effective_till + # The date until when the hours of operation override would be + # effective. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/CreateHoursOfOperationOverrideRequest AWS API Documentation + # + class CreateHoursOfOperationOverrideRequest < Struct.new( + :instance_id, + :hours_of_operation_id, + :name, + :description, + :config, + :effective_from, + :effective_till) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] hours_of_operation_override_id + # The identifier for the hours of operation override. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/CreateHoursOfOperationOverrideResponse AWS API Documentation + # + class CreateHoursOfOperationOverrideResponse < Struct.new( + :hours_of_operation_override_id) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -5538,6 +5610,31 @@ class CustomerVoiceActivity < Struct.new( include Aws::Structure end + # An object to specify the hours of operation override date condition. + # + # @!attribute [rw] field_name + # An object to specify the hours of operation override date field. + # @return [String] + # + # @!attribute [rw] value + # An object to specify the hours of operation override date value. + # @return [String] + # + # @!attribute [rw] comparison_type + # An object to specify the hours of operation override date condition + # `comparisonType`. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DateCondition AWS API Documentation + # + class DateCondition < Struct.new( + :field_name, + :value, + :comparison_type) + SENSITIVE = [] + include Aws::Structure + end + # Information about a reference when the `referenceType` is `DATE`. # Otherwise, null. # @@ -5814,6 +5911,28 @@ class DeleteEvaluationFormRequest < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation. + # @return [String] + # + # @!attribute [rw] hours_of_operation_override_id + # The identifier for the hours of operation override. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DeleteHoursOfOperationOverrideRequest AWS API Documentation + # + class DeleteHoursOfOperationOverrideRequest < Struct.new( + :instance_id, + :hours_of_operation_id, + :hours_of_operation_override_id) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -6579,6 +6698,40 @@ class DescribeEvaluationFormResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation. + # @return [String] + # + # @!attribute [rw] hours_of_operation_override_id + # The identifier for the hours of operation override. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DescribeHoursOfOperationOverrideRequest AWS API Documentation + # + class DescribeHoursOfOperationOverrideRequest < Struct.new( + :instance_id, + :hours_of_operation_id, + :hours_of_operation_override_id) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] hours_of_operation_override + # Information about the hours of operations override. + # @return [Types::HoursOfOperationOverride] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/DescribeHoursOfOperationOverrideResponse AWS API Documentation + # + class DescribeHoursOfOperationOverrideResponse < Struct.new( + :hours_of_operation_override) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -7710,6 +7863,27 @@ class DuplicateResourceException < Struct.new( include Aws::Structure end + # Information about the hours of operations with the effective override + # applied. + # + # @!attribute [rw] date + # The date that the hours of operation or overrides applies to. + # @return [String] + # + # @!attribute [rw] operational_hours + # Information about the hours of operations with the effective + # override applied. + # @return [Array] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/EffectiveHoursOfOperations AWS API Documentation + # + class EffectiveHoursOfOperations < Struct.new( + :date, + :operational_hours) + SENSITIVE = [] + include Aws::Structure + end + # Contains information about a source or destination email address # # @!attribute [rw] email_address @@ -9564,6 +9738,50 @@ class GetCurrentUserDataResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation. + # @return [String] + # + # @!attribute [rw] from_date + # The Date from when the hours of operation are listed. + # @return [String] + # + # @!attribute [rw] to_date + # The Date until when the hours of operation are listed. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/GetEffectiveHoursOfOperationsRequest AWS API Documentation + # + class GetEffectiveHoursOfOperationsRequest < Struct.new( + :instance_id, + :hours_of_operation_id, + :from_date, + :to_date) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] effective_hours_of_operation_list + # Information about the effective hours of operations + # @return [Array] + # + # @!attribute [rw] time_zone + # The time zone for the hours of operation. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/GetEffectiveHoursOfOperationsResponse AWS API Documentation + # + class GetEffectiveHoursOfOperationsResponse < Struct.new( + :effective_hours_of_operation_list, + :time_zone) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -12179,6 +12397,118 @@ class HoursOfOperationConfig < Struct.new( include Aws::Structure end + # Information about the hours of operations override. + # + # @!attribute [rw] hours_of_operation_override_id + # The identifier for the hours of operation override. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation. + # @return [String] + # + # @!attribute [rw] hours_of_operation_arn + # The Amazon Resource Name (ARN) for the hours of operation. + # @return [String] + # + # @!attribute [rw] name + # The name of the hours of operation override. + # @return [String] + # + # @!attribute [rw] description + # The description of the hours of operation override. + # @return [String] + # + # @!attribute [rw] config + # Configuration information for the hours of operation override: day, + # start time, and end time. + # @return [Array] + # + # @!attribute [rw] effective_from + # The date from which the hours of operation override would be + # effective. + # @return [String] + # + # @!attribute [rw] effective_till + # The date till which the hours of operation override would be + # effective. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/HoursOfOperationOverride AWS API Documentation + # + class HoursOfOperationOverride < Struct.new( + :hours_of_operation_override_id, + :hours_of_operation_id, + :hours_of_operation_arn, + :name, + :description, + :config, + :effective_from, + :effective_till) + SENSITIVE = [] + include Aws::Structure + end + + # Information about the hours of operation override config: day, start + # time, and end time. + # + # @!attribute [rw] day + # The day that the hours of operation override applies to. + # @return [String] + # + # @!attribute [rw] start_time + # The start time when your contact center opens if overrides are + # applied. + # @return [Types::OverrideTimeSlice] + # + # @!attribute [rw] end_time + # The end time that your contact center closes if overrides are + # applied. + # @return [Types::OverrideTimeSlice] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/HoursOfOperationOverrideConfig AWS API Documentation + # + class HoursOfOperationOverrideConfig < Struct.new( + :day, + :start_time, + :end_time) + SENSITIVE = [] + include Aws::Structure + end + + # The search criteria to be used to return hours of operations + # overrides. + # + # @!attribute [rw] or_conditions + # A list of conditions which would be applied together with an OR + # condition. + # @return [Array] + # + # @!attribute [rw] and_conditions + # A list of conditions which would be applied together with an AND + # condition. + # @return [Array] + # + # @!attribute [rw] string_condition + # A leaf node condition which can be used to specify a string + # condition. + # @return [Types::StringCondition] + # + # @!attribute [rw] date_condition + # A leaf node condition which can be used to specify a date condition. + # @return [Types::DateCondition] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/HoursOfOperationOverrideSearchCriteria AWS API Documentation + # + class HoursOfOperationOverrideSearchCriteria < Struct.new( + :or_conditions, + :and_conditions, + :string_condition, + :date_condition) + SENSITIVE = [] + include Aws::Structure + end + # The search criteria to be used to return hours of operations. # # @!attribute [rw] or_conditions @@ -13766,6 +14096,66 @@ class ListFlowAssociationsResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation + # @return [String] + # + # @!attribute [rw] next_token + # The token for the next set of results. Use the value returned in the + # previous response in the next request to retrieve the next set of + # results. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum number of results to return per page. The default + # MaxResult size is 100. Valid Range: Minimum value of 1. Maximum + # value of 1000. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/ListHoursOfOperationOverridesRequest AWS API Documentation + # + class ListHoursOfOperationOverridesRequest < Struct.new( + :instance_id, + :hours_of_operation_id, + :next_token, + :max_results) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] next_token + # The token for the next set of results. Use the value returned in the + # previous response in the next request to retrieve the next set of + # results. + # @return [String] + # + # @!attribute [rw] hours_of_operation_override_list + # Information about the hours of operation override. + # @return [Array] + # + # @!attribute [rw] last_modified_region + # The AWS Region where this resource was last modified. + # @return [String] + # + # @!attribute [rw] last_modified_time + # The timestamp when this resource was last modified. + # @return [Time] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/ListHoursOfOperationOverridesResponse AWS API Documentation + # + class ListHoursOfOperationOverridesResponse < Struct.new( + :next_token, + :hours_of_operation_override_list, + :last_modified_region, + :last_modified_time) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -16077,6 +16467,26 @@ class NumericQuestionPropertyValueAutomation < Struct.new( include Aws::Structure end + # Information about the hours of operations with the effective override + # applied. + # + # @!attribute [rw] start + # The start time that your contact center opens. + # @return [Types::OverrideTimeSlice] + # + # @!attribute [rw] end + # The end time that your contact center closes. + # @return [Types::OverrideTimeSlice] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/OperationalHour AWS API Documentation + # + class OperationalHour < Struct.new( + :start, + :end) + SENSITIVE = [] + include Aws::Structure + end + # The additional recipients information of outbound email. # # @!attribute [rw] cc_email_addresses @@ -16207,6 +16617,25 @@ class OutputTypeNotFoundException < Struct.new( include Aws::Structure end + # The start time or end time for an hours of operation override. + # + # @!attribute [rw] hours + # The hours. + # @return [Integer] + # + # @!attribute [rw] minutes + # The minutes. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/OverrideTimeSlice AWS API Documentation + # + class OverrideTimeSlice < Struct.new( + :hours, + :minutes) + SENSITIVE = [] + include Aws::Structure + end + # The configuration for the allowed video and screen sharing # capabilities for participants present over the call. For more # information, see [Set up in-app, web, video calling, and screen @@ -19353,6 +19782,69 @@ class SearchEmailAddressesResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] next_token + # The token for the next set of results. Use the value returned in the + # previous response in the next request to retrieve the next set of + # results. Length Constraints: Minimum length of 1. Maximum length of + # 2500. + # @return [String] + # + # @!attribute [rw] max_results + # The maximum number of results to return per page. Valid Range: + # Minimum value of 1. Maximum value of 100. + # @return [Integer] + # + # @!attribute [rw] search_filter + # Filters to be applied to search results. + # @return [Types::HoursOfOperationSearchFilter] + # + # @!attribute [rw] search_criteria + # The search criteria to be used to return hours of operations + # overrides. + # @return [Types::HoursOfOperationOverrideSearchCriteria] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/SearchHoursOfOperationOverridesRequest AWS API Documentation + # + class SearchHoursOfOperationOverridesRequest < Struct.new( + :instance_id, + :next_token, + :max_results, + :search_filter, + :search_criteria) + SENSITIVE = [] + include Aws::Structure + end + + # @!attribute [rw] hours_of_operation_overrides + # Information about the hours of operations overrides. + # @return [Array] + # + # @!attribute [rw] next_token + # The token for the next set of results. Use the value returned in the + # previous response in the next request to retrieve the next set of + # results. Length Constraints: Minimum length of 1. Maximum length of + # 2500. + # @return [String] + # + # @!attribute [rw] approximate_total_count + # The total number of hours of operations which matched your search + # query. + # @return [Integer] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/SearchHoursOfOperationOverridesResponse AWS API Documentation + # + class SearchHoursOfOperationOverridesResponse < Struct.new( + :hours_of_operation_overrides, + :next_token, + :approximate_total_count) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -23853,6 +24345,56 @@ class UpdateEvaluationFormResponse < Struct.new( include Aws::Structure end + # @!attribute [rw] instance_id + # The identifier of the Amazon Connect instance. + # @return [String] + # + # @!attribute [rw] hours_of_operation_id + # The identifier for the hours of operation. + # @return [String] + # + # @!attribute [rw] hours_of_operation_override_id + # The identifier for the hours of operation override. + # @return [String] + # + # @!attribute [rw] name + # The name of the hours of operation override. + # @return [String] + # + # @!attribute [rw] description + # The description of the hours of operation override. + # @return [String] + # + # @!attribute [rw] config + # Configuration information for the hours of operation override: day, + # start time, and end time. + # @return [Array] + # + # @!attribute [rw] effective_from + # The date from when the hours of operation override would be + # effective. + # @return [String] + # + # @!attribute [rw] effective_till + # The date till when the hours of operation override would be + # effective. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/connect-2017-08-08/UpdateHoursOfOperationOverrideRequest AWS API Documentation + # + class UpdateHoursOfOperationOverrideRequest < Struct.new( + :instance_id, + :hours_of_operation_id, + :hours_of_operation_override_id, + :name, + :description, + :config, + :effective_from, + :effective_till) + SENSITIVE = [] + include Aws::Structure + end + # @!attribute [rw] instance_id # The identifier of the Amazon Connect instance. You can [find the # instance ID][1] in the Amazon Resource Name (ARN) of the instance. @@ -25468,12 +26010,18 @@ class UserHierarchyGroupSearchFilter < Struct.new( # # @!attribute [rw] first_name # The first name. This is required if you are using Amazon Connect or - # SAML for identity management. + # SAML for identity management. Inputs must be in Unicode + # Normalization Form C (NFC). Text containing characters in a non-NFC + # form (for example, decomposed characters or combining marks) are not + # accepted. # @return [String] # # @!attribute [rw] last_name # The last name. This is required if you are using Amazon Connect or - # SAML for identity management. + # SAML for identity management. Inputs must be in Unicode + # Normalization Form C (NFC). Text containing characters in a non-NFC + # form (for example, decomposed characters or combining marks) are not + # accepted. # @return [String] # # @!attribute [rw] email diff --git a/gems/aws-sdk-connect/sig/client.rbs b/gems/aws-sdk-connect/sig/client.rbs index b0124dbd85a..42e624848f0 100644 --- a/gems/aws-sdk-connect/sig/client.rbs +++ b/gems/aws-sdk-connect/sig/client.rbs @@ -602,6 +602,34 @@ module Aws ) -> _CreateHoursOfOperationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateHoursOfOperationResponseSuccess + interface _CreateHoursOfOperationOverrideResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::CreateHoursOfOperationOverrideResponse] + def hours_of_operation_override_id: () -> ::String + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#create_hours_of_operation_override-instance_method + def create_hours_of_operation_override: ( + instance_id: ::String, + hours_of_operation_id: ::String, + name: ::String, + ?description: ::String, + config: Array[ + { + day: ("SUNDAY" | "MONDAY" | "TUESDAY" | "WEDNESDAY" | "THURSDAY" | "FRIDAY" | "SATURDAY")?, + start_time: { + hours: ::Integer, + minutes: ::Integer + }?, + end_time: { + hours: ::Integer, + minutes: ::Integer + }? + }, + ], + effective_from: ::String, + effective_till: ::String + ) -> _CreateHoursOfOperationOverrideResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateHoursOfOperationOverrideResponseSuccess + interface _CreateInstanceResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::CreateInstanceResponse] def id: () -> ::String @@ -1169,6 +1197,14 @@ module Aws ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#delete_hours_of_operation_override-instance_method + def delete_hours_of_operation_override: ( + instance_id: ::String, + hours_of_operation_id: ::String, + hours_of_operation_override_id: ::String + ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#delete_instance-instance_method def delete_instance: ( instance_id: ::String @@ -1425,6 +1461,18 @@ module Aws ) -> _DescribeHoursOfOperationResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeHoursOfOperationResponseSuccess + interface _DescribeHoursOfOperationOverrideResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::DescribeHoursOfOperationOverrideResponse] + def hours_of_operation_override: () -> Types::HoursOfOperationOverride + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#describe_hours_of_operation_override-instance_method + def describe_hours_of_operation_override: ( + instance_id: ::String, + hours_of_operation_id: ::String, + hours_of_operation_override_id: ::String + ) -> _DescribeHoursOfOperationOverrideResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _DescribeHoursOfOperationOverrideResponseSuccess + interface _DescribeInstanceResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::DescribeInstanceResponse] def instance: () -> Types::Instance @@ -1833,6 +1881,20 @@ module Aws ) -> _GetCurrentUserDataResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetCurrentUserDataResponseSuccess + interface _GetEffectiveHoursOfOperationsResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::GetEffectiveHoursOfOperationsResponse] + def effective_hours_of_operation_list: () -> ::Array[Types::EffectiveHoursOfOperations] + def time_zone: () -> ::String + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#get_effective_hours_of_operations-instance_method + def get_effective_hours_of_operations: ( + instance_id: ::String, + hours_of_operation_id: ::String, + from_date: ::String, + to_date: ::String + ) -> _GetEffectiveHoursOfOperationsResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _GetEffectiveHoursOfOperationsResponseSuccess + interface _GetFederationTokenResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::GetFederationTokenResponse] def credentials: () -> Types::Credentials @@ -2210,6 +2272,22 @@ module Aws ) -> _ListFlowAssociationsResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListFlowAssociationsResponseSuccess + interface _ListHoursOfOperationOverridesResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::ListHoursOfOperationOverridesResponse] + def next_token: () -> ::String + def hours_of_operation_override_list: () -> ::Array[Types::HoursOfOperationOverride] + def last_modified_region: () -> ::String + def last_modified_time: () -> ::Time + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#list_hours_of_operation_overrides-instance_method + def list_hours_of_operation_overrides: ( + instance_id: ::String, + hours_of_operation_id: ::String, + ?next_token: ::String, + ?max_results: ::Integer + ) -> _ListHoursOfOperationOverridesResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ListHoursOfOperationOverridesResponseSuccess + interface _ListHoursOfOperationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::ListHoursOfOperationsResponse] def hours_of_operation_summary_list: () -> ::Array[Types::HoursOfOperationSummary] @@ -2855,7 +2933,9 @@ module Aws field_name: ::String?, value: ::String?, comparison_type: ("STARTS_WITH" | "CONTAINS" | "EXACT")? - }? + }?, + state_condition: ("ACTIVE" | "ARCHIVED")?, + status_condition: ("PUBLISHED" | "SAVED")? } ) -> _SearchContactFlowModulesResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _SearchContactFlowModulesResponseSuccess @@ -3027,6 +3107,60 @@ module Aws ) -> _SearchEmailAddressesResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _SearchEmailAddressesResponseSuccess + interface _SearchHoursOfOperationOverridesResponseSuccess + include ::Seahorse::Client::_ResponseSuccess[Types::SearchHoursOfOperationOverridesResponse] + def hours_of_operation_overrides: () -> ::Array[Types::HoursOfOperationOverride] + def next_token: () -> ::String + def approximate_total_count: () -> ::Integer + end + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#search_hours_of_operation_overrides-instance_method + def search_hours_of_operation_overrides: ( + instance_id: ::String, + ?next_token: ::String, + ?max_results: ::Integer, + ?search_filter: { + tag_filter: { + or_conditions: Array[ + Array[ + { + tag_key: ::String?, + tag_value: ::String? + }, + ], + ]?, + and_conditions: Array[ + { + tag_key: ::String?, + tag_value: ::String? + }, + ]?, + tag_condition: { + tag_key: ::String?, + tag_value: ::String? + }? + }? + }, + ?search_criteria: { + or_conditions: Array[ + untyped, + ]?, + and_conditions: Array[ + untyped, + ]?, + string_condition: { + field_name: ::String?, + value: ::String?, + comparison_type: ("STARTS_WITH" | "CONTAINS" | "EXACT")? + }?, + date_condition: { + field_name: ::String?, + value: ::String?, + comparison_type: ("GREATER_THAN" | "LESS_THAN" | "GREATER_THAN_OR_EQUAL_TO" | "LESS_THAN_OR_EQUAL_TO" | "EQUAL_TO")? + }? + } + ) -> _SearchHoursOfOperationOverridesResponseSuccess + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _SearchHoursOfOperationOverridesResponseSuccess + interface _SearchHoursOfOperationsResponseSuccess include ::Seahorse::Client::_ResponseSuccess[Types::SearchHoursOfOperationsResponse] def hours_of_operations: () -> ::Array[Types::HoursOfOperation] @@ -4431,6 +4565,31 @@ module Aws ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#update_hours_of_operation_override-instance_method + def update_hours_of_operation_override: ( + instance_id: ::String, + hours_of_operation_id: ::String, + hours_of_operation_override_id: ::String, + ?name: ::String, + ?description: ::String, + ?config: Array[ + { + day: ("SUNDAY" | "MONDAY" | "TUESDAY" | "WEDNESDAY" | "THURSDAY" | "FRIDAY" | "SATURDAY")?, + start_time: { + hours: ::Integer, + minutes: ::Integer + }?, + end_time: { + hours: ::Integer, + minutes: ::Integer + }? + }, + ], + ?effective_from: ::String, + ?effective_till: ::String + ) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> ::Seahorse::Client::_ResponseSuccess[::Aws::EmptyStructure] + # https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/Connect/Client.html#update_instance_attribute-instance_method def update_instance_attribute: ( instance_id: ::String, diff --git a/gems/aws-sdk-connect/sig/types.rbs b/gems/aws-sdk-connect/sig/types.rbs index 4502a71f7ab..660cfe80a55 100644 --- a/gems/aws-sdk-connect/sig/types.rbs +++ b/gems/aws-sdk-connect/sig/types.rbs @@ -656,6 +656,8 @@ module Aws::Connect attr_accessor or_conditions: ::Array[Types::ContactFlowModuleSearchCriteria] attr_accessor and_conditions: ::Array[Types::ContactFlowModuleSearchCriteria] attr_accessor string_condition: Types::StringCondition + attr_accessor state_condition: ("ACTIVE" | "ARCHIVED") + attr_accessor status_condition: ("PUBLISHED" | "SAVED") SENSITIVE: [] end @@ -896,6 +898,22 @@ module Aws::Connect SENSITIVE: [] end + class CreateHoursOfOperationOverrideRequest + attr_accessor instance_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor name: ::String + attr_accessor description: ::String + attr_accessor config: ::Array[Types::HoursOfOperationOverrideConfig] + attr_accessor effective_from: ::String + attr_accessor effective_till: ::String + SENSITIVE: [] + end + + class CreateHoursOfOperationOverrideResponse + attr_accessor hours_of_operation_override_id: ::String + SENSITIVE: [] + end + class CreateHoursOfOperationRequest attr_accessor instance_id: ::String attr_accessor name: ::String @@ -1297,6 +1315,13 @@ module Aws::Connect SENSITIVE: [] end + class DateCondition + attr_accessor field_name: ::String + attr_accessor value: ::String + attr_accessor comparison_type: ("GREATER_THAN" | "LESS_THAN" | "GREATER_THAN_OR_EQUAL_TO" | "LESS_THAN_OR_EQUAL_TO" | "EQUAL_TO") + SENSITIVE: [] + end + class DateReference attr_accessor name: ::String attr_accessor value: ::String @@ -1375,6 +1400,13 @@ module Aws::Connect SENSITIVE: [] end + class DeleteHoursOfOperationOverrideRequest + attr_accessor instance_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor hours_of_operation_override_id: ::String + SENSITIVE: [] + end + class DeleteHoursOfOperationRequest attr_accessor instance_id: ::String attr_accessor hours_of_operation_id: ::String @@ -1609,6 +1641,18 @@ module Aws::Connect SENSITIVE: [] end + class DescribeHoursOfOperationOverrideRequest + attr_accessor instance_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor hours_of_operation_override_id: ::String + SENSITIVE: [] + end + + class DescribeHoursOfOperationOverrideResponse + attr_accessor hours_of_operation_override: Types::HoursOfOperationOverride + SENSITIVE: [] + end + class DescribeHoursOfOperationRequest attr_accessor instance_id: ::String attr_accessor hours_of_operation_id: ::String @@ -1955,6 +1999,12 @@ module Aws::Connect SENSITIVE: [] end + class EffectiveHoursOfOperations + attr_accessor date: ::String + attr_accessor operational_hours: ::Array[Types::OperationalHour] + SENSITIVE: [] + end + class EmailAddressInfo attr_accessor email_address: ::String attr_accessor display_name: ::String @@ -2410,6 +2460,20 @@ module Aws::Connect SENSITIVE: [] end + class GetEffectiveHoursOfOperationsRequest + attr_accessor instance_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor from_date: ::String + attr_accessor to_date: ::String + SENSITIVE: [] + end + + class GetEffectiveHoursOfOperationsResponse + attr_accessor effective_hours_of_operation_list: ::Array[Types::EffectiveHoursOfOperations] + attr_accessor time_zone: ::String + SENSITIVE: [] + end + class GetFederationTokenRequest attr_accessor instance_id: ::String SENSITIVE: [] @@ -2658,6 +2722,33 @@ module Aws::Connect SENSITIVE: [] end + class HoursOfOperationOverride + attr_accessor hours_of_operation_override_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor hours_of_operation_arn: ::String + attr_accessor name: ::String + attr_accessor description: ::String + attr_accessor config: ::Array[Types::HoursOfOperationOverrideConfig] + attr_accessor effective_from: ::String + attr_accessor effective_till: ::String + SENSITIVE: [] + end + + class HoursOfOperationOverrideConfig + attr_accessor day: ("SUNDAY" | "MONDAY" | "TUESDAY" | "WEDNESDAY" | "THURSDAY" | "FRIDAY" | "SATURDAY") + attr_accessor start_time: Types::OverrideTimeSlice + attr_accessor end_time: Types::OverrideTimeSlice + SENSITIVE: [] + end + + class HoursOfOperationOverrideSearchCriteria + attr_accessor or_conditions: ::Array[Types::HoursOfOperationOverrideSearchCriteria] + attr_accessor and_conditions: ::Array[Types::HoursOfOperationOverrideSearchCriteria] + attr_accessor string_condition: Types::StringCondition + attr_accessor date_condition: Types::DateCondition + SENSITIVE: [] + end + class HoursOfOperationSearchCriteria attr_accessor or_conditions: ::Array[Types::HoursOfOperationSearchCriteria] attr_accessor and_conditions: ::Array[Types::HoursOfOperationSearchCriteria] @@ -3081,6 +3172,22 @@ module Aws::Connect SENSITIVE: [] end + class ListHoursOfOperationOverridesRequest + attr_accessor instance_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor next_token: ::String + attr_accessor max_results: ::Integer + SENSITIVE: [] + end + + class ListHoursOfOperationOverridesResponse + attr_accessor next_token: ::String + attr_accessor hours_of_operation_override_list: ::Array[Types::HoursOfOperationOverride] + attr_accessor last_modified_region: ::String + attr_accessor last_modified_time: ::Time + SENSITIVE: [] + end + class ListHoursOfOperationsRequest attr_accessor instance_id: ::String attr_accessor next_token: ::String @@ -3665,6 +3772,12 @@ module Aws::Connect SENSITIVE: [] end + class OperationalHour + attr_accessor start: Types::OverrideTimeSlice + attr_accessor end: Types::OverrideTimeSlice + SENSITIVE: [] + end + class OutboundAdditionalRecipients attr_accessor cc_email_addresses: ::Array[Types::EmailAddressInfo] SENSITIVE: [] @@ -3706,6 +3819,12 @@ module Aws::Connect SENSITIVE: [] end + class OverrideTimeSlice + attr_accessor hours: ::Integer + attr_accessor minutes: ::Integer + SENSITIVE: [] + end + class ParticipantCapabilities attr_accessor video: ("SEND") attr_accessor screen_share: ("SEND") @@ -4515,6 +4634,22 @@ module Aws::Connect SENSITIVE: [] end + class SearchHoursOfOperationOverridesRequest + attr_accessor instance_id: ::String + attr_accessor next_token: ::String + attr_accessor max_results: ::Integer + attr_accessor search_filter: Types::HoursOfOperationSearchFilter + attr_accessor search_criteria: Types::HoursOfOperationOverrideSearchCriteria + SENSITIVE: [] + end + + class SearchHoursOfOperationOverridesResponse + attr_accessor hours_of_operation_overrides: ::Array[Types::HoursOfOperationOverride] + attr_accessor next_token: ::String + attr_accessor approximate_total_count: ::Integer + SENSITIVE: [] + end + class SearchHoursOfOperationsRequest attr_accessor instance_id: ::String attr_accessor next_token: ::String @@ -5545,6 +5680,18 @@ module Aws::Connect SENSITIVE: [] end + class UpdateHoursOfOperationOverrideRequest + attr_accessor instance_id: ::String + attr_accessor hours_of_operation_id: ::String + attr_accessor hours_of_operation_override_id: ::String + attr_accessor name: ::String + attr_accessor description: ::String + attr_accessor config: ::Array[Types::HoursOfOperationOverrideConfig] + attr_accessor effective_from: ::String + attr_accessor effective_till: ::String + SENSITIVE: [] + end + class UpdateHoursOfOperationRequest attr_accessor instance_id: ::String attr_accessor hours_of_operation_id: ::String diff --git a/gems/aws-sdk-databasemigrationservice/CHANGELOG.md b/gems/aws-sdk-databasemigrationservice/CHANGELOG.md index ba6882e537b..a0215374138 100644 --- a/gems/aws-sdk-databasemigrationservice/CHANGELOG.md +++ b/gems/aws-sdk-databasemigrationservice/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.111.0 (2024-12-12) +------------------ + +* Feature - Add parameters to support for kerberos authentication. Add parameter for disabling the Unicode source filter with PostgreSQL settings. Add parameter to use large integer value with Kinesis/Kafka settings. + 1.110.0 (2024-11-06) ------------------ diff --git a/gems/aws-sdk-databasemigrationservice/VERSION b/gems/aws-sdk-databasemigrationservice/VERSION index 1916b6b587d..d313a193da0 100644 --- a/gems/aws-sdk-databasemigrationservice/VERSION +++ b/gems/aws-sdk-databasemigrationservice/VERSION @@ -1 +1 @@ -1.110.0 +1.111.0 diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb index a273b7c057d..b79333031d4 100644 --- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb +++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice.rb @@ -55,7 +55,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-databasemigrationservice/endpoint_provider' autoload :Endpoints, 'aws-sdk-databasemigrationservice/endpoints' - GEM_VERSION = '1.110.0' + GEM_VERSION = '1.111.0' end diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb index 21482eabeed..057971f7ef9 100644 --- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb +++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client.rb @@ -1425,6 +1425,7 @@ def create_data_provider(params = {}, options = {}) # include_control_details: false, # include_null_and_empty: false, # no_hex_prefix: false, + # use_large_integer_value: false, # }, # kafka_settings: { # broker: "String", @@ -1447,6 +1448,7 @@ def create_data_provider(params = {}, options = {}) # no_hex_prefix: false, # sasl_mechanism: "scram-sha-512", # accepts scram-sha-512, plain # ssl_endpoint_identification_algorithm: "none", # accepts none, https + # use_large_integer_value: false, # }, # elasticsearch_settings: { # service_access_role_arn: "String", # required @@ -1522,6 +1524,7 @@ def create_data_provider(params = {}, options = {}) # map_long_varchar_as: "wstring", # accepts wstring, clob, nclob # database_mode: "default", # accepts default, babelfish # babelfish_database_name: "String", + # disable_unicode_source_filter: false, # }, # my_sql_settings: { # after_connect_script: "String", @@ -1584,6 +1587,7 @@ def create_data_provider(params = {}, options = {}) # trim_space_in_char: false, # convert_timestamp_with_zone_to_utc: false, # open_transaction_window: 1, + # authentication_method: "password", # accepts password, kerberos # }, # sybase_settings: { # database_name: "String", @@ -1612,6 +1616,7 @@ def create_data_provider(params = {}, options = {}) # trim_space_in_char: false, # tlog_access_mode: "BackupOnly", # accepts BackupOnly, PreferBackup, PreferTlog, TlogOnly # force_lob_lookup: false, + # authentication_method: "password", # accepts password, kerberos # }, # ibm_db_2_settings: { # database_name: "String", @@ -1768,6 +1773,7 @@ def create_data_provider(params = {}, options = {}) # resp.endpoint.kinesis_settings.include_control_details #=> Boolean # resp.endpoint.kinesis_settings.include_null_and_empty #=> Boolean # resp.endpoint.kinesis_settings.no_hex_prefix #=> Boolean + # resp.endpoint.kinesis_settings.use_large_integer_value #=> Boolean # resp.endpoint.kafka_settings.broker #=> String # resp.endpoint.kafka_settings.topic #=> String # resp.endpoint.kafka_settings.message_format #=> String, one of "json", "json-unformatted" @@ -1788,6 +1794,7 @@ def create_data_provider(params = {}, options = {}) # resp.endpoint.kafka_settings.no_hex_prefix #=> Boolean # resp.endpoint.kafka_settings.sasl_mechanism #=> String, one of "scram-sha-512", "plain" # resp.endpoint.kafka_settings.ssl_endpoint_identification_algorithm #=> String, one of "none", "https" + # resp.endpoint.kafka_settings.use_large_integer_value #=> Boolean # resp.endpoint.elasticsearch_settings.service_access_role_arn #=> String # resp.endpoint.elasticsearch_settings.endpoint_uri #=> String # resp.endpoint.elasticsearch_settings.full_load_error_percentage #=> Integer @@ -1855,6 +1862,7 @@ def create_data_provider(params = {}, options = {}) # resp.endpoint.postgre_sql_settings.map_long_varchar_as #=> String, one of "wstring", "clob", "nclob" # resp.endpoint.postgre_sql_settings.database_mode #=> String, one of "default", "babelfish" # resp.endpoint.postgre_sql_settings.babelfish_database_name #=> String + # resp.endpoint.postgre_sql_settings.disable_unicode_source_filter #=> Boolean # resp.endpoint.my_sql_settings.after_connect_script #=> String # resp.endpoint.my_sql_settings.clean_source_metadata_on_mismatch #=> Boolean # resp.endpoint.my_sql_settings.database_name #=> String @@ -1914,6 +1922,7 @@ def create_data_provider(params = {}, options = {}) # resp.endpoint.oracle_settings.trim_space_in_char #=> Boolean # resp.endpoint.oracle_settings.convert_timestamp_with_zone_to_utc #=> Boolean # resp.endpoint.oracle_settings.open_transaction_window #=> Integer + # resp.endpoint.oracle_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoint.sybase_settings.database_name #=> String # resp.endpoint.sybase_settings.password #=> String # resp.endpoint.sybase_settings.port #=> Integer @@ -1938,6 +1947,7 @@ def create_data_provider(params = {}, options = {}) # resp.endpoint.microsoft_sql_server_settings.trim_space_in_char #=> Boolean # resp.endpoint.microsoft_sql_server_settings.tlog_access_mode #=> String, one of "BackupOnly", "PreferBackup", "PreferTlog", "TlogOnly" # resp.endpoint.microsoft_sql_server_settings.force_lob_lookup #=> Boolean + # resp.endpoint.microsoft_sql_server_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoint.ibm_db_2_settings.database_name #=> String # resp.endpoint.ibm_db_2_settings.password #=> String # resp.endpoint.ibm_db_2_settings.port #=> Integer @@ -2737,6 +2747,10 @@ def create_replication_config(params = {}, options = {}) # as IPv4 only or Dual-stack that supports both IPv4 and IPv6 # addressing. IPv6 only is not yet supported. # + # @option params [Types::KerberosAuthenticationSettings] :kerberos_authentication_settings + # Specifies the ID of the secret that stores the key cache file required + # for kerberos authentication, when creating a replication instance. + # # @return [Types::CreateReplicationInstanceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::CreateReplicationInstanceResponse#replication_instance #replication_instance} => Types::ReplicationInstance @@ -2827,7 +2841,7 @@ def create_replication_config(params = {}, options = {}) # resp = client.create_replication_instance({ # replication_instance_identifier: "String", # required # allocated_storage: 1, - # replication_instance_class: "String", # required + # replication_instance_class: "ReplicationInstanceClass", # required # vpc_security_group_ids: ["String"], # availability_zone: "String", # replication_subnet_group_identifier: "String", @@ -2847,6 +2861,11 @@ def create_replication_config(params = {}, options = {}) # dns_name_servers: "String", # resource_identifier: "String", # network_type: "String", + # kerberos_authentication_settings: { + # key_cache_secret_id: "String", + # key_cache_secret_iam_arn: "String", + # krb_5_file_contents: "String", + # }, # }) # # @example Response structure @@ -2894,6 +2913,9 @@ def create_replication_config(params = {}, options = {}) # resp.replication_instance.free_until #=> Time # resp.replication_instance.dns_name_servers #=> String # resp.replication_instance.network_type #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_id #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_iam_arn #=> String + # resp.replication_instance.kerberos_authentication_settings.krb_5_file_contents #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CreateReplicationInstance AWS API Documentation # @@ -3639,6 +3661,7 @@ def delete_data_provider(params = {}, options = {}) # resp.endpoint.kinesis_settings.include_control_details #=> Boolean # resp.endpoint.kinesis_settings.include_null_and_empty #=> Boolean # resp.endpoint.kinesis_settings.no_hex_prefix #=> Boolean + # resp.endpoint.kinesis_settings.use_large_integer_value #=> Boolean # resp.endpoint.kafka_settings.broker #=> String # resp.endpoint.kafka_settings.topic #=> String # resp.endpoint.kafka_settings.message_format #=> String, one of "json", "json-unformatted" @@ -3659,6 +3682,7 @@ def delete_data_provider(params = {}, options = {}) # resp.endpoint.kafka_settings.no_hex_prefix #=> Boolean # resp.endpoint.kafka_settings.sasl_mechanism #=> String, one of "scram-sha-512", "plain" # resp.endpoint.kafka_settings.ssl_endpoint_identification_algorithm #=> String, one of "none", "https" + # resp.endpoint.kafka_settings.use_large_integer_value #=> Boolean # resp.endpoint.elasticsearch_settings.service_access_role_arn #=> String # resp.endpoint.elasticsearch_settings.endpoint_uri #=> String # resp.endpoint.elasticsearch_settings.full_load_error_percentage #=> Integer @@ -3726,6 +3750,7 @@ def delete_data_provider(params = {}, options = {}) # resp.endpoint.postgre_sql_settings.map_long_varchar_as #=> String, one of "wstring", "clob", "nclob" # resp.endpoint.postgre_sql_settings.database_mode #=> String, one of "default", "babelfish" # resp.endpoint.postgre_sql_settings.babelfish_database_name #=> String + # resp.endpoint.postgre_sql_settings.disable_unicode_source_filter #=> Boolean # resp.endpoint.my_sql_settings.after_connect_script #=> String # resp.endpoint.my_sql_settings.clean_source_metadata_on_mismatch #=> Boolean # resp.endpoint.my_sql_settings.database_name #=> String @@ -3785,6 +3810,7 @@ def delete_data_provider(params = {}, options = {}) # resp.endpoint.oracle_settings.trim_space_in_char #=> Boolean # resp.endpoint.oracle_settings.convert_timestamp_with_zone_to_utc #=> Boolean # resp.endpoint.oracle_settings.open_transaction_window #=> Integer + # resp.endpoint.oracle_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoint.sybase_settings.database_name #=> String # resp.endpoint.sybase_settings.password #=> String # resp.endpoint.sybase_settings.port #=> Integer @@ -3809,6 +3835,7 @@ def delete_data_provider(params = {}, options = {}) # resp.endpoint.microsoft_sql_server_settings.trim_space_in_char #=> Boolean # resp.endpoint.microsoft_sql_server_settings.tlog_access_mode #=> String, one of "BackupOnly", "PreferBackup", "PreferTlog", "TlogOnly" # resp.endpoint.microsoft_sql_server_settings.force_lob_lookup #=> Boolean + # resp.endpoint.microsoft_sql_server_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoint.ibm_db_2_settings.database_name #=> String # resp.endpoint.ibm_db_2_settings.password #=> String # resp.endpoint.ibm_db_2_settings.port #=> Integer @@ -4276,6 +4303,9 @@ def delete_replication_config(params = {}, options = {}) # resp.replication_instance.free_until #=> Time # resp.replication_instance.dns_name_servers #=> String # resp.replication_instance.network_type #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_id #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_iam_arn #=> String + # resp.replication_instance.kerberos_authentication_settings.krb_5_file_contents #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteReplicationInstance AWS API Documentation # @@ -4946,7 +4976,8 @@ def describe_data_migrations(params = {}, options = {}) # Filters applied to the data providers described in the form of # key-value pairs. # - # Valid filter names: data-provider-identifier + # Valid filter names and values: data-provider-identifier, data provider + # arn or name # # @option params [Integer] :max_records # The maximum number of records to include in the response. If more @@ -5389,6 +5420,7 @@ def describe_endpoint_types(params = {}, options = {}) # resp.endpoints[0].kinesis_settings.include_control_details #=> Boolean # resp.endpoints[0].kinesis_settings.include_null_and_empty #=> Boolean # resp.endpoints[0].kinesis_settings.no_hex_prefix #=> Boolean + # resp.endpoints[0].kinesis_settings.use_large_integer_value #=> Boolean # resp.endpoints[0].kafka_settings.broker #=> String # resp.endpoints[0].kafka_settings.topic #=> String # resp.endpoints[0].kafka_settings.message_format #=> String, one of "json", "json-unformatted" @@ -5409,6 +5441,7 @@ def describe_endpoint_types(params = {}, options = {}) # resp.endpoints[0].kafka_settings.no_hex_prefix #=> Boolean # resp.endpoints[0].kafka_settings.sasl_mechanism #=> String, one of "scram-sha-512", "plain" # resp.endpoints[0].kafka_settings.ssl_endpoint_identification_algorithm #=> String, one of "none", "https" + # resp.endpoints[0].kafka_settings.use_large_integer_value #=> Boolean # resp.endpoints[0].elasticsearch_settings.service_access_role_arn #=> String # resp.endpoints[0].elasticsearch_settings.endpoint_uri #=> String # resp.endpoints[0].elasticsearch_settings.full_load_error_percentage #=> Integer @@ -5476,6 +5509,7 @@ def describe_endpoint_types(params = {}, options = {}) # resp.endpoints[0].postgre_sql_settings.map_long_varchar_as #=> String, one of "wstring", "clob", "nclob" # resp.endpoints[0].postgre_sql_settings.database_mode #=> String, one of "default", "babelfish" # resp.endpoints[0].postgre_sql_settings.babelfish_database_name #=> String + # resp.endpoints[0].postgre_sql_settings.disable_unicode_source_filter #=> Boolean # resp.endpoints[0].my_sql_settings.after_connect_script #=> String # resp.endpoints[0].my_sql_settings.clean_source_metadata_on_mismatch #=> Boolean # resp.endpoints[0].my_sql_settings.database_name #=> String @@ -5535,6 +5569,7 @@ def describe_endpoint_types(params = {}, options = {}) # resp.endpoints[0].oracle_settings.trim_space_in_char #=> Boolean # resp.endpoints[0].oracle_settings.convert_timestamp_with_zone_to_utc #=> Boolean # resp.endpoints[0].oracle_settings.open_transaction_window #=> Integer + # resp.endpoints[0].oracle_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoints[0].sybase_settings.database_name #=> String # resp.endpoints[0].sybase_settings.password #=> String # resp.endpoints[0].sybase_settings.port #=> Integer @@ -5559,6 +5594,7 @@ def describe_endpoint_types(params = {}, options = {}) # resp.endpoints[0].microsoft_sql_server_settings.trim_space_in_char #=> Boolean # resp.endpoints[0].microsoft_sql_server_settings.tlog_access_mode #=> String, one of "BackupOnly", "PreferBackup", "PreferTlog", "TlogOnly" # resp.endpoints[0].microsoft_sql_server_settings.force_lob_lookup #=> Boolean + # resp.endpoints[0].microsoft_sql_server_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoints[0].ibm_db_2_settings.database_name #=> String # resp.endpoints[0].ibm_db_2_settings.password #=> String # resp.endpoints[0].ibm_db_2_settings.port #=> Integer @@ -6359,6 +6395,9 @@ def describe_fleet_advisor_schemas(params = {}, options = {}) # Filters applied to the instance profiles described in the form of # key-value pairs. # + # Valid filter names and values: instance-profile-identifier, instance + # profile arn or name + # # @option params [Integer] :max_records # The maximum number of records to include in the response. If more # records exist than the specified `MaxRecords` value, DMS includes a @@ -6917,6 +6956,14 @@ def describe_metadata_model_imports(params = {}, options = {}) # Filters applied to the migration projects described in the form of # key-value pairs. # + # Valid filter names and values: + # + # * instance-profile-identifier, instance profile arn or name + # + # * data-provider-identifier, data provider arn or name + # + # * migration-project-identifier, migration project arn or name + # # @option params [Integer] :max_records # The maximum number of records to include in the response. If more # records exist than the specified `MaxRecords` value, DMS includes a @@ -7613,6 +7660,9 @@ def describe_replication_instance_task_logs(params = {}, options = {}) # resp.replication_instances[0].free_until #=> Time # resp.replication_instances[0].dns_name_servers #=> String # resp.replication_instances[0].network_type #=> String + # resp.replication_instances[0].kerberos_authentication_settings.key_cache_secret_id #=> String + # resp.replication_instances[0].kerberos_authentication_settings.key_cache_secret_iam_arn #=> String + # resp.replication_instances[0].kerberos_authentication_settings.krb_5_file_contents #=> String # # # The following waiters are defined for this operation (see {Client#wait_until} for detailed usage): @@ -9384,6 +9434,7 @@ def modify_data_provider(params = {}, options = {}) # include_control_details: false, # include_null_and_empty: false, # no_hex_prefix: false, + # use_large_integer_value: false, # }, # kafka_settings: { # broker: "String", @@ -9406,6 +9457,7 @@ def modify_data_provider(params = {}, options = {}) # no_hex_prefix: false, # sasl_mechanism: "scram-sha-512", # accepts scram-sha-512, plain # ssl_endpoint_identification_algorithm: "none", # accepts none, https + # use_large_integer_value: false, # }, # elasticsearch_settings: { # service_access_role_arn: "String", # required @@ -9481,6 +9533,7 @@ def modify_data_provider(params = {}, options = {}) # map_long_varchar_as: "wstring", # accepts wstring, clob, nclob # database_mode: "default", # accepts default, babelfish # babelfish_database_name: "String", + # disable_unicode_source_filter: false, # }, # my_sql_settings: { # after_connect_script: "String", @@ -9543,6 +9596,7 @@ def modify_data_provider(params = {}, options = {}) # trim_space_in_char: false, # convert_timestamp_with_zone_to_utc: false, # open_transaction_window: 1, + # authentication_method: "password", # accepts password, kerberos # }, # sybase_settings: { # database_name: "String", @@ -9571,6 +9625,7 @@ def modify_data_provider(params = {}, options = {}) # trim_space_in_char: false, # tlog_access_mode: "BackupOnly", # accepts BackupOnly, PreferBackup, PreferTlog, TlogOnly # force_lob_lookup: false, + # authentication_method: "password", # accepts password, kerberos # }, # ibm_db_2_settings: { # database_name: "String", @@ -9727,6 +9782,7 @@ def modify_data_provider(params = {}, options = {}) # resp.endpoint.kinesis_settings.include_control_details #=> Boolean # resp.endpoint.kinesis_settings.include_null_and_empty #=> Boolean # resp.endpoint.kinesis_settings.no_hex_prefix #=> Boolean + # resp.endpoint.kinesis_settings.use_large_integer_value #=> Boolean # resp.endpoint.kafka_settings.broker #=> String # resp.endpoint.kafka_settings.topic #=> String # resp.endpoint.kafka_settings.message_format #=> String, one of "json", "json-unformatted" @@ -9747,6 +9803,7 @@ def modify_data_provider(params = {}, options = {}) # resp.endpoint.kafka_settings.no_hex_prefix #=> Boolean # resp.endpoint.kafka_settings.sasl_mechanism #=> String, one of "scram-sha-512", "plain" # resp.endpoint.kafka_settings.ssl_endpoint_identification_algorithm #=> String, one of "none", "https" + # resp.endpoint.kafka_settings.use_large_integer_value #=> Boolean # resp.endpoint.elasticsearch_settings.service_access_role_arn #=> String # resp.endpoint.elasticsearch_settings.endpoint_uri #=> String # resp.endpoint.elasticsearch_settings.full_load_error_percentage #=> Integer @@ -9814,6 +9871,7 @@ def modify_data_provider(params = {}, options = {}) # resp.endpoint.postgre_sql_settings.map_long_varchar_as #=> String, one of "wstring", "clob", "nclob" # resp.endpoint.postgre_sql_settings.database_mode #=> String, one of "default", "babelfish" # resp.endpoint.postgre_sql_settings.babelfish_database_name #=> String + # resp.endpoint.postgre_sql_settings.disable_unicode_source_filter #=> Boolean # resp.endpoint.my_sql_settings.after_connect_script #=> String # resp.endpoint.my_sql_settings.clean_source_metadata_on_mismatch #=> Boolean # resp.endpoint.my_sql_settings.database_name #=> String @@ -9873,6 +9931,7 @@ def modify_data_provider(params = {}, options = {}) # resp.endpoint.oracle_settings.trim_space_in_char #=> Boolean # resp.endpoint.oracle_settings.convert_timestamp_with_zone_to_utc #=> Boolean # resp.endpoint.oracle_settings.open_transaction_window #=> Integer + # resp.endpoint.oracle_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoint.sybase_settings.database_name #=> String # resp.endpoint.sybase_settings.password #=> String # resp.endpoint.sybase_settings.port #=> Integer @@ -9897,6 +9956,7 @@ def modify_data_provider(params = {}, options = {}) # resp.endpoint.microsoft_sql_server_settings.trim_space_in_char #=> Boolean # resp.endpoint.microsoft_sql_server_settings.tlog_access_mode #=> String, one of "BackupOnly", "PreferBackup", "PreferTlog", "TlogOnly" # resp.endpoint.microsoft_sql_server_settings.force_lob_lookup #=> Boolean + # resp.endpoint.microsoft_sql_server_settings.authentication_method #=> String, one of "password", "kerberos" # resp.endpoint.ibm_db_2_settings.database_name #=> String # resp.endpoint.ibm_db_2_settings.password #=> String # resp.endpoint.ibm_db_2_settings.port #=> Integer @@ -10479,6 +10539,10 @@ def modify_replication_config(params = {}, options = {}) # as IPv4 only or Dual-stack that supports both IPv4 and IPv6 # addressing. IPv6 only is not yet supported. # + # @option params [Types::KerberosAuthenticationSettings] :kerberos_authentication_settings + # Specifies the ID of the secret that stores the key cache file required + # for kerberos authentication, when modifying a replication instance. + # # @return [Types::ModifyReplicationInstanceResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::ModifyReplicationInstanceResponse#replication_instance #replication_instance} => Types::ReplicationInstance @@ -10564,7 +10628,7 @@ def modify_replication_config(params = {}, options = {}) # replication_instance_arn: "String", # required # allocated_storage: 1, # apply_immediately: false, - # replication_instance_class: "String", + # replication_instance_class: "ReplicationInstanceClass", # vpc_security_group_ids: ["String"], # preferred_maintenance_window: "String", # multi_az: false, @@ -10573,6 +10637,11 @@ def modify_replication_config(params = {}, options = {}) # auto_minor_version_upgrade: false, # replication_instance_identifier: "String", # network_type: "String", + # kerberos_authentication_settings: { + # key_cache_secret_id: "String", + # key_cache_secret_iam_arn: "String", + # krb_5_file_contents: "String", + # }, # }) # # @example Response structure @@ -10620,6 +10689,9 @@ def modify_replication_config(params = {}, options = {}) # resp.replication_instance.free_until #=> Time # resp.replication_instance.dns_name_servers #=> String # resp.replication_instance.network_type #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_id #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_iam_arn #=> String + # resp.replication_instance.kerberos_authentication_settings.krb_5_file_contents #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyReplicationInstance AWS API Documentation # @@ -10987,6 +11059,9 @@ def move_replication_task(params = {}, options = {}) # resp.replication_instance.free_until #=> Time # resp.replication_instance.dns_name_servers #=> String # resp.replication_instance.network_type #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_id #=> String + # resp.replication_instance.kerberos_authentication_settings.key_cache_secret_iam_arn #=> String + # resp.replication_instance.kerberos_authentication_settings.krb_5_file_contents #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/RebootReplicationInstance AWS API Documentation # @@ -11669,6 +11744,23 @@ def start_recommendations(params = {}, options = {}) # @option params [required, String] :start_replication_type # The replication type. # + # When the replication type is `full-load` or `full-load-and-cdc`, the + # only valid value for the first run of the replication is + # `start-replication`. This option will start the replication. + # + # You can also use ReloadTables to reload specific tables that failed + # during replication instead of restarting the replication. + # + # The `resume-processing` option isn't applicable for a full-load + # replication, because you can't resume partially loaded tables during + # the full load phase. + # + # For a `full-load-and-cdc` replication, DMS migrates table data, and + # then applies data changes that occur on the source. To load all the + # tables again, and start capturing source changes, use `reload-target`. + # Otherwise use `resume-processing`, to replicate the changes from the + # last stop position. + # # @option params [Time,DateTime,Date,Integer,String] :cdc_start_time # Indicates the start time for a change data capture (CDC) operation. # Use either `CdcStartTime` or `CdcStartPosition` to specify when you @@ -12445,7 +12537,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-databasemigrationservice' - context[:gem_version] = '1.110.0' + context[:gem_version] = '1.111.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client_api.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client_api.rb index 7ab1fbf71ab..adb67bd6742 100644 --- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client_api.rb +++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/client_api.rb @@ -282,6 +282,7 @@ module ClientApi KafkaSecurityProtocol = Shapes::StringShape.new(name: 'KafkaSecurityProtocol') KafkaSettings = Shapes::StructureShape.new(name: 'KafkaSettings') KafkaSslEndpointIdentificationAlgorithm = Shapes::StringShape.new(name: 'KafkaSslEndpointIdentificationAlgorithm') + KerberosAuthenticationSettings = Shapes::StructureShape.new(name: 'KerberosAuthenticationSettings') KeyList = Shapes::ListShape.new(name: 'KeyList') KinesisSettings = Shapes::StructureShape.new(name: 'KinesisSettings') Limitation = Shapes::StructureShape.new(name: 'Limitation') @@ -329,6 +330,7 @@ module ClientApi MySqlDataProviderSettings = Shapes::StructureShape.new(name: 'MySqlDataProviderSettings') NeptuneSettings = Shapes::StructureShape.new(name: 'NeptuneSettings') NestingLevelValue = Shapes::StringShape.new(name: 'NestingLevelValue') + OracleAuthenticationMethod = Shapes::StringShape.new(name: 'OracleAuthenticationMethod') OracleDataProviderSettings = Shapes::StructureShape.new(name: 'OracleDataProviderSettings') OracleSettings = Shapes::StructureShape.new(name: 'OracleSettings') OrderableReplicationInstance = Shapes::StructureShape.new(name: 'OrderableReplicationInstance') @@ -373,6 +375,7 @@ module ClientApi ReplicationConfigList = Shapes::ListShape.new(name: 'ReplicationConfigList') ReplicationEndpointTypeValue = Shapes::StringShape.new(name: 'ReplicationEndpointTypeValue') ReplicationInstance = Shapes::StructureShape.new(name: 'ReplicationInstance') + ReplicationInstanceClass = Shapes::StringShape.new(name: 'ReplicationInstanceClass') ReplicationInstanceIpv6AddressList = Shapes::ListShape.new(name: 'ReplicationInstanceIpv6AddressList') ReplicationInstanceList = Shapes::ListShape.new(name: 'ReplicationInstanceList') ReplicationInstancePrivateIpAddressList = Shapes::ListShape.new(name: 'ReplicationInstancePrivateIpAddressList') @@ -421,6 +424,7 @@ module ClientApi SourceDataSettings = Shapes::ListShape.new(name: 'SourceDataSettings') SourceIdsList = Shapes::ListShape.new(name: 'SourceIdsList') SourceType = Shapes::StringShape.new(name: 'SourceType') + SqlServerAuthenticationMethod = Shapes::StringShape.new(name: 'SqlServerAuthenticationMethod') SslSecurityProtocolValue = Shapes::StringShape.new(name: 'SslSecurityProtocolValue') StartDataMigrationMessage = Shapes::StructureShape.new(name: 'StartDataMigrationMessage') StartDataMigrationResponse = Shapes::StructureShape.new(name: 'StartDataMigrationResponse') @@ -739,7 +743,7 @@ module ClientApi CreateReplicationInstanceMessage.add_member(:replication_instance_identifier, Shapes::ShapeRef.new(shape: String, required: true, location_name: "ReplicationInstanceIdentifier")) CreateReplicationInstanceMessage.add_member(:allocated_storage, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "AllocatedStorage")) - CreateReplicationInstanceMessage.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: String, required: true, location_name: "ReplicationInstanceClass")) + CreateReplicationInstanceMessage.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: ReplicationInstanceClass, required: true, location_name: "ReplicationInstanceClass")) CreateReplicationInstanceMessage.add_member(:vpc_security_group_ids, Shapes::ShapeRef.new(shape: VpcSecurityGroupIdList, location_name: "VpcSecurityGroupIds")) CreateReplicationInstanceMessage.add_member(:availability_zone, Shapes::ShapeRef.new(shape: String, location_name: "AvailabilityZone")) CreateReplicationInstanceMessage.add_member(:replication_subnet_group_identifier, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationSubnetGroupIdentifier")) @@ -753,6 +757,7 @@ module ClientApi CreateReplicationInstanceMessage.add_member(:dns_name_servers, Shapes::ShapeRef.new(shape: String, location_name: "DnsNameServers")) CreateReplicationInstanceMessage.add_member(:resource_identifier, Shapes::ShapeRef.new(shape: String, location_name: "ResourceIdentifier")) CreateReplicationInstanceMessage.add_member(:network_type, Shapes::ShapeRef.new(shape: String, location_name: "NetworkType")) + CreateReplicationInstanceMessage.add_member(:kerberos_authentication_settings, Shapes::ShapeRef.new(shape: KerberosAuthenticationSettings, location_name: "KerberosAuthenticationSettings")) CreateReplicationInstanceMessage.struct_class = Types::CreateReplicationInstanceMessage CreateReplicationInstanceResponse.add_member(:replication_instance, Shapes::ShapeRef.new(shape: ReplicationInstance, location_name: "ReplicationInstance")) @@ -1702,8 +1707,14 @@ module ClientApi KafkaSettings.add_member(:no_hex_prefix, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "NoHexPrefix")) KafkaSettings.add_member(:sasl_mechanism, Shapes::ShapeRef.new(shape: KafkaSaslMechanism, location_name: "SaslMechanism")) KafkaSettings.add_member(:ssl_endpoint_identification_algorithm, Shapes::ShapeRef.new(shape: KafkaSslEndpointIdentificationAlgorithm, location_name: "SslEndpointIdentificationAlgorithm")) + KafkaSettings.add_member(:use_large_integer_value, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "UseLargeIntegerValue")) KafkaSettings.struct_class = Types::KafkaSettings + KerberosAuthenticationSettings.add_member(:key_cache_secret_id, Shapes::ShapeRef.new(shape: String, location_name: "KeyCacheSecretId")) + KerberosAuthenticationSettings.add_member(:key_cache_secret_iam_arn, Shapes::ShapeRef.new(shape: String, location_name: "KeyCacheSecretIamArn")) + KerberosAuthenticationSettings.add_member(:krb_5_file_contents, Shapes::ShapeRef.new(shape: String, location_name: "Krb5FileContents")) + KerberosAuthenticationSettings.struct_class = Types::KerberosAuthenticationSettings + KeyList.member = Shapes::ShapeRef.new(shape: String) KinesisSettings.add_member(:stream_arn, Shapes::ShapeRef.new(shape: String, location_name: "StreamArn")) @@ -1716,6 +1727,7 @@ module ClientApi KinesisSettings.add_member(:include_control_details, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "IncludeControlDetails")) KinesisSettings.add_member(:include_null_and_empty, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "IncludeNullAndEmpty")) KinesisSettings.add_member(:no_hex_prefix, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "NoHexPrefix")) + KinesisSettings.add_member(:use_large_integer_value, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "UseLargeIntegerValue")) KinesisSettings.struct_class = Types::KinesisSettings Limitation.add_member(:database_id, Shapes::ShapeRef.new(shape: String, location_name: "DatabaseId")) @@ -1758,6 +1770,7 @@ module ClientApi MicrosoftSQLServerSettings.add_member(:trim_space_in_char, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "TrimSpaceInChar")) MicrosoftSQLServerSettings.add_member(:tlog_access_mode, Shapes::ShapeRef.new(shape: TlogAccessMode, location_name: "TlogAccessMode")) MicrosoftSQLServerSettings.add_member(:force_lob_lookup, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "ForceLobLookup")) + MicrosoftSQLServerSettings.add_member(:authentication_method, Shapes::ShapeRef.new(shape: SqlServerAuthenticationMethod, location_name: "AuthenticationMethod")) MicrosoftSQLServerSettings.struct_class = Types::MicrosoftSQLServerSettings MicrosoftSqlServerDataProviderSettings.add_member(:server_name, Shapes::ShapeRef.new(shape: String, location_name: "ServerName")) @@ -1905,7 +1918,7 @@ module ClientApi ModifyReplicationInstanceMessage.add_member(:replication_instance_arn, Shapes::ShapeRef.new(shape: String, required: true, location_name: "ReplicationInstanceArn")) ModifyReplicationInstanceMessage.add_member(:allocated_storage, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "AllocatedStorage")) ModifyReplicationInstanceMessage.add_member(:apply_immediately, Shapes::ShapeRef.new(shape: Boolean, location_name: "ApplyImmediately")) - ModifyReplicationInstanceMessage.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceClass")) + ModifyReplicationInstanceMessage.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: ReplicationInstanceClass, location_name: "ReplicationInstanceClass")) ModifyReplicationInstanceMessage.add_member(:vpc_security_group_ids, Shapes::ShapeRef.new(shape: VpcSecurityGroupIdList, location_name: "VpcSecurityGroupIds")) ModifyReplicationInstanceMessage.add_member(:preferred_maintenance_window, Shapes::ShapeRef.new(shape: String, location_name: "PreferredMaintenanceWindow")) ModifyReplicationInstanceMessage.add_member(:multi_az, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "MultiAZ")) @@ -1914,6 +1927,7 @@ module ClientApi ModifyReplicationInstanceMessage.add_member(:auto_minor_version_upgrade, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "AutoMinorVersionUpgrade")) ModifyReplicationInstanceMessage.add_member(:replication_instance_identifier, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceIdentifier")) ModifyReplicationInstanceMessage.add_member(:network_type, Shapes::ShapeRef.new(shape: String, location_name: "NetworkType")) + ModifyReplicationInstanceMessage.add_member(:kerberos_authentication_settings, Shapes::ShapeRef.new(shape: KerberosAuthenticationSettings, location_name: "KerberosAuthenticationSettings")) ModifyReplicationInstanceMessage.struct_class = Types::ModifyReplicationInstanceMessage ModifyReplicationInstanceResponse.add_member(:replication_instance, Shapes::ShapeRef.new(shape: ReplicationInstance, location_name: "ReplicationInstance")) @@ -2063,10 +2077,11 @@ module ClientApi OracleSettings.add_member(:trim_space_in_char, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "TrimSpaceInChar")) OracleSettings.add_member(:convert_timestamp_with_zone_to_utc, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "ConvertTimestampWithZoneToUTC")) OracleSettings.add_member(:open_transaction_window, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "OpenTransactionWindow")) + OracleSettings.add_member(:authentication_method, Shapes::ShapeRef.new(shape: OracleAuthenticationMethod, location_name: "AuthenticationMethod")) OracleSettings.struct_class = Types::OracleSettings OrderableReplicationInstance.add_member(:engine_version, Shapes::ShapeRef.new(shape: String, location_name: "EngineVersion")) - OrderableReplicationInstance.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceClass")) + OrderableReplicationInstance.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: ReplicationInstanceClass, location_name: "ReplicationInstanceClass")) OrderableReplicationInstance.add_member(:storage_type, Shapes::ShapeRef.new(shape: String, location_name: "StorageType")) OrderableReplicationInstance.add_member(:min_allocated_storage, Shapes::ShapeRef.new(shape: Integer, location_name: "MinAllocatedStorage")) OrderableReplicationInstance.add_member(:max_allocated_storage, Shapes::ShapeRef.new(shape: Integer, location_name: "MaxAllocatedStorage")) @@ -2114,6 +2129,7 @@ module ClientApi PostgreSQLSettings.add_member(:map_long_varchar_as, Shapes::ShapeRef.new(shape: LongVarcharMappingType, location_name: "MapLongVarcharAs")) PostgreSQLSettings.add_member(:database_mode, Shapes::ShapeRef.new(shape: DatabaseMode, location_name: "DatabaseMode")) PostgreSQLSettings.add_member(:babelfish_database_name, Shapes::ShapeRef.new(shape: String, location_name: "BabelfishDatabaseName")) + PostgreSQLSettings.add_member(:disable_unicode_source_filter, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "DisableUnicodeSourceFilter")) PostgreSQLSettings.struct_class = Types::PostgreSQLSettings PostgreSqlDataProviderSettings.add_member(:server_name, Shapes::ShapeRef.new(shape: String, location_name: "ServerName")) @@ -2303,7 +2319,7 @@ module ClientApi ReplicationConfigList.member = Shapes::ShapeRef.new(shape: ReplicationConfig) ReplicationInstance.add_member(:replication_instance_identifier, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceIdentifier")) - ReplicationInstance.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceClass")) + ReplicationInstance.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: ReplicationInstanceClass, location_name: "ReplicationInstanceClass")) ReplicationInstance.add_member(:replication_instance_status, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceStatus")) ReplicationInstance.add_member(:allocated_storage, Shapes::ShapeRef.new(shape: Integer, location_name: "AllocatedStorage")) ReplicationInstance.add_member(:instance_create_time, Shapes::ShapeRef.new(shape: TStamp, location_name: "InstanceCreateTime")) @@ -2327,6 +2343,7 @@ module ClientApi ReplicationInstance.add_member(:free_until, Shapes::ShapeRef.new(shape: TStamp, location_name: "FreeUntil")) ReplicationInstance.add_member(:dns_name_servers, Shapes::ShapeRef.new(shape: String, location_name: "DnsNameServers")) ReplicationInstance.add_member(:network_type, Shapes::ShapeRef.new(shape: String, location_name: "NetworkType")) + ReplicationInstance.add_member(:kerberos_authentication_settings, Shapes::ShapeRef.new(shape: KerberosAuthenticationSettings, location_name: "KerberosAuthenticationSettings")) ReplicationInstance.struct_class = Types::ReplicationInstance ReplicationInstanceIpv6AddressList.member = Shapes::ShapeRef.new(shape: String) @@ -2346,7 +2363,7 @@ module ClientApi ReplicationList.member = Shapes::ShapeRef.new(shape: Replication) - ReplicationPendingModifiedValues.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: String, location_name: "ReplicationInstanceClass")) + ReplicationPendingModifiedValues.add_member(:replication_instance_class, Shapes::ShapeRef.new(shape: ReplicationInstanceClass, location_name: "ReplicationInstanceClass")) ReplicationPendingModifiedValues.add_member(:allocated_storage, Shapes::ShapeRef.new(shape: IntegerOptional, location_name: "AllocatedStorage")) ReplicationPendingModifiedValues.add_member(:multi_az, Shapes::ShapeRef.new(shape: BooleanOptional, location_name: "MultiAZ")) ReplicationPendingModifiedValues.add_member(:engine_version, Shapes::ShapeRef.new(shape: String, location_name: "EngineVersion")) @@ -2408,7 +2425,7 @@ module ClientApi ReplicationTaskAssessmentResult.add_member(:assessment_status, Shapes::ShapeRef.new(shape: String, location_name: "AssessmentStatus")) ReplicationTaskAssessmentResult.add_member(:assessment_results_file, Shapes::ShapeRef.new(shape: String, location_name: "AssessmentResultsFile")) ReplicationTaskAssessmentResult.add_member(:assessment_results, Shapes::ShapeRef.new(shape: String, location_name: "AssessmentResults")) - ReplicationTaskAssessmentResult.add_member(:s3_object_url, Shapes::ShapeRef.new(shape: String, location_name: "S3ObjectUrl")) + ReplicationTaskAssessmentResult.add_member(:s3_object_url, Shapes::ShapeRef.new(shape: SecretString, location_name: "S3ObjectUrl")) ReplicationTaskAssessmentResult.struct_class = Types::ReplicationTaskAssessmentResult ReplicationTaskAssessmentResultList.member = Shapes::ShapeRef.new(shape: ReplicationTaskAssessmentResult) @@ -3103,6 +3120,7 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: DeleteEventSubscriptionResponse) o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundFault) o.errors << Shapes::ShapeRef.new(shape: InvalidResourceStateFault) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedFault) end) api.add_operation(:delete_fleet_advisor_collector, Seahorse::Model::Operation.new.tap do |o| @@ -3181,6 +3199,7 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: DeleteReplicationSubnetGroupResponse) o.errors << Shapes::ShapeRef.new(shape: InvalidResourceStateFault) o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundFault) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedFault) end) api.add_operation(:delete_replication_task, Seahorse::Model::Operation.new.tap do |o| @@ -3841,6 +3860,7 @@ module ClientApi o.output = Shapes::ShapeRef.new(shape: DescribeTableStatisticsResponse) o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundFault) o.errors << Shapes::ShapeRef.new(shape: InvalidResourceStateFault) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedFault) o[:pager] = Aws::Pager.new( limit_key: "max_records", tokens: { @@ -3940,6 +3960,7 @@ module ClientApi o.errors << Shapes::ShapeRef.new(shape: KMSInvalidStateFault) o.errors << Shapes::ShapeRef.new(shape: KMSNotFoundFault) o.errors << Shapes::ShapeRef.new(shape: KMSThrottlingFault) + o.errors << Shapes::ShapeRef.new(shape: AccessDeniedFault) end) api.add_operation(:modify_instance_profile, Seahorse::Model::Operation.new.tap do |o| diff --git a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb index 3aa32342400..f49e6549920 100644 --- a/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb +++ b/gems/aws-sdk-databasemigrationservice/lib/aws-sdk-databasemigrationservice/types.rb @@ -1627,6 +1627,12 @@ class CreateReplicationConfigResponse < Struct.new( # addressing. IPv6 only is not yet supported. # @return [String] # + # @!attribute [rw] kerberos_authentication_settings + # Specifies the ID of the secret that stores the key cache file + # required for kerberos authentication, when creating a replication + # instance. + # @return [Types::KerberosAuthenticationSettings] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CreateReplicationInstanceMessage AWS API Documentation # class CreateReplicationInstanceMessage < Struct.new( @@ -1645,7 +1651,8 @@ class CreateReplicationInstanceMessage < Struct.new( :publicly_accessible, :dns_name_servers, :resource_identifier, - :network_type) + :network_type, + :kerberos_authentication_settings) SENSITIVE = [] include Aws::Structure end @@ -3024,7 +3031,8 @@ class DescribeDataMigrationsResponse < Struct.new( # Filters applied to the data providers described in the form of # key-value pairs. # - # Valid filter names: data-provider-identifier + # Valid filter names and values: data-provider-identifier, data + # provider arn or name # @return [Array] # # @!attribute [rw] max_records @@ -3796,6 +3804,9 @@ class DescribeFleetAdvisorSchemasResponse < Struct.new( # @!attribute [rw] filters # Filters applied to the instance profiles described in the form of # key-value pairs. + # + # Valid filter names and values: instance-profile-identifier, instance + # profile arn or name # @return [Array] # # @!attribute [rw] max_records @@ -4173,6 +4184,14 @@ class DescribeMetadataModelImportsResponse < Struct.new( # @!attribute [rw] filters # Filters applied to the migration projects described in the form of # key-value pairs. + # + # Valid filter names and values: + # + # * instance-profile-identifier, instance profile arn or name + # + # * data-provider-identifier, data provider arn or name + # + # * migration-project-identifier, migration project arn or name # @return [Array] # # @!attribute [rw] max_records @@ -6750,6 +6769,10 @@ class KMSThrottlingFault < Struct.new( # supported in DMS version 3.5.1 and later. # @return [String] # + # @!attribute [rw] use_large_integer_value + # Specifies using the large integer value with Kafka. + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/KafkaSettings AWS API Documentation # class KafkaSettings < Struct.new( @@ -6772,11 +6795,40 @@ class KafkaSettings < Struct.new( :sasl_password, :no_hex_prefix, :sasl_mechanism, - :ssl_endpoint_identification_algorithm) + :ssl_endpoint_identification_algorithm, + :use_large_integer_value) SENSITIVE = [:ssl_client_key_password, :sasl_password] include Aws::Structure end + # Specifies using Kerberos authentication settings for use with DMS. + # + # @!attribute [rw] key_cache_secret_id + # Specifies the secret ID of the key cache for the replication + # instance. + # @return [String] + # + # @!attribute [rw] key_cache_secret_iam_arn + # Specifies the Amazon Resource Name (ARN) of the IAM role that grants + # Amazon Web Services DMS access to the secret containing key cache + # file for the replication instance. + # @return [String] + # + # @!attribute [rw] krb_5_file_contents + # Specifies the ID of the secret that stores the key cache file + # required for kerberos authentication of the replication instance. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/KerberosAuthenticationSettings AWS API Documentation + # + class KerberosAuthenticationSettings < Struct.new( + :key_cache_secret_id, + :key_cache_secret_iam_arn, + :krb_5_file_contents) + SENSITIVE = [] + include Aws::Structure + end + # Provides information that describes an Amazon Kinesis Data Stream # endpoint. This information includes the output format of records # applied to the endpoint and details of transaction and control table @@ -6849,6 +6901,10 @@ class KafkaSettings < Struct.new( # type columns without adding the '0x' prefix. # @return [Boolean] # + # @!attribute [rw] use_large_integer_value + # Specifies using the large integer value with Kinesis. + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/KinesisSettings AWS API Documentation # class KinesisSettings < Struct.new( @@ -6861,7 +6917,8 @@ class KinesisSettings < Struct.new( :include_table_alter_operations, :include_control_details, :include_null_and_empty, - :no_hex_prefix) + :no_hex_prefix, + :use_large_integer_value) SENSITIVE = [] include Aws::Structure end @@ -7123,6 +7180,10 @@ class MariaDbDataProviderSettings < Struct.new( # Forces LOB lookup on inline LOB. # @return [Boolean] # + # @!attribute [rw] authentication_method + # Specifies using Kerberos authentication with Microsoft SQL Server. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/MicrosoftSQLServerSettings AWS API Documentation # class MicrosoftSQLServerSettings < Struct.new( @@ -7142,7 +7203,8 @@ class MicrosoftSQLServerSettings < Struct.new( :secrets_manager_secret_id, :trim_space_in_char, :tlog_access_mode, - :force_lob_lookup) + :force_lob_lookup, + :authentication_method) SENSITIVE = [:password] include Aws::Structure end @@ -8130,6 +8192,12 @@ class ModifyReplicationConfigResponse < Struct.new( # addressing. IPv6 only is not yet supported. # @return [String] # + # @!attribute [rw] kerberos_authentication_settings + # Specifies the ID of the secret that stores the key cache file + # required for kerberos authentication, when modifying a replication + # instance. + # @return [Types::KerberosAuthenticationSettings] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ModifyReplicationInstanceMessage AWS API Documentation # class ModifyReplicationInstanceMessage < Struct.new( @@ -8144,7 +8212,8 @@ class ModifyReplicationInstanceMessage < Struct.new( :allow_major_version_upgrade, :auto_minor_version_upgrade, :replication_instance_identifier, - :network_type) + :network_type, + :kerberos_authentication_settings) SENSITIVE = [] include Aws::Structure end @@ -9028,8 +9097,8 @@ class OracleDataProviderSettings < Struct.new( # @return [Boolean] # # @!attribute [rw] archived_logs_only - # When this field is set to `Y`, DMS only accesses the archived redo - # logs. If the archived redo logs are stored on Automatic Storage + # When this field is set to `True`, DMS only accesses the archived + # redo logs. If the archived redo logs are stored on Automatic Storage # Management (ASM) only, the DMS user account needs to be granted ASM # privileges. # @return [Boolean] @@ -9198,12 +9267,12 @@ class OracleDataProviderSettings < Struct.new( # @return [String] # # @!attribute [rw] use_b_file - # Set this attribute to Y to capture change data using the Binary - # Reader utility. Set `UseLogminerReader` to N to set this attribute - # to Y. To use Binary Reader with Amazon RDS for Oracle as the source, - # you set additional attributes. For more information about using this - # setting with Oracle Automatic Storage Management (ASM), see [ Using - # Oracle LogMiner or DMS Binary Reader for CDC][1]. + # Set this attribute to True to capture change data using the Binary + # Reader utility. Set `UseLogminerReader` to False to set this + # attribute to True. To use Binary Reader with Amazon RDS for Oracle + # as the source, you set additional attributes. For more information + # about using this setting with Oracle Automatic Storage Management + # (ASM), see [ Using Oracle LogMiner or DMS Binary Reader for CDC][1]. # # # @@ -9211,17 +9280,17 @@ class OracleDataProviderSettings < Struct.new( # @return [Boolean] # # @!attribute [rw] use_direct_path_full_load - # Set this attribute to Y to have DMS use a direct path full load. + # Set this attribute to True to have DMS use a direct path full load. # Specify this value to use the direct path protocol in the Oracle # Call Interface (OCI). By using this OCI protocol, you can bulk-load # Oracle target tables during a full load. # @return [Boolean] # # @!attribute [rw] use_logminer_reader - # Set this attribute to Y to capture change data using the Oracle - # LogMiner utility (the default). Set this attribute to N if you want - # to access the redo logs as a binary file. When you set - # `UseLogminerReader` to N, also set `UseBfile` to Y. For more + # Set this attribute to True to capture change data using the Oracle + # LogMiner utility (the default). Set this attribute to False if you + # want to access the redo logs as a binary file. When you set + # `UseLogminerReader` to False, also set `UseBfile` to True. For more # information on this setting and using Oracle ASM, see [ Using Oracle # LogMiner or DMS Binary Reader for CDC][1] in the *DMS User Guide*. # @@ -9312,13 +9381,15 @@ class OracleDataProviderSettings < Struct.new( # You can specify an integer value between 0 (the default) and 240 # (the maximum). # - # This parameter is only valid in DMS version 3.5.0 and later. DMS - # supports a window of up to 9.5 hours including the value for - # `OpenTransactionWindow`. + # This parameter is only valid in DMS version 3.5.0 and later. # # # @return [Integer] # + # @!attribute [rw] authentication_method + # Specifies using Kerberos authentication with Oracle. + # @return [String] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/OracleSettings AWS API Documentation # class OracleSettings < Struct.new( @@ -9364,7 +9435,8 @@ class OracleSettings < Struct.new( :secrets_manager_oracle_asm_secret_id, :trim_space_in_char, :convert_timestamp_with_zone_to_utc, - :open_transaction_window) + :open_transaction_window, + :authentication_method) SENSITIVE = [:asm_password, :password, :security_db_encryption] include Aws::Structure end @@ -9517,6 +9589,8 @@ class PendingMaintenanceAction < Struct.new( # PostgreSQL database when the task starts. You can later remove these # artifacts. # + # The default value is `true`. + # # If this value is set to `N`, you don't have to create tables or # triggers on the source database. # @return [Boolean] @@ -9525,6 +9599,8 @@ class PendingMaintenanceAction < Struct.new( # Specifies the maximum size (in KB) of any .csv file used to transfer # data to PostgreSQL. # + # The default value is 32,768 KB (32 MB). + # # Example: `maxFileSize=512` # @return [Integer] # @@ -9536,6 +9612,8 @@ class PendingMaintenanceAction < Struct.new( # The schema in which the operational DDL database artifacts are # created. # + # The default value is `public`. + # # Example: `ddlArtifactsSchema=xyzddlschema;` # @return [String] # @@ -9550,6 +9628,8 @@ class PendingMaintenanceAction < Struct.new( # When set to `true`, this value causes a task to fail if the actual # size of a LOB column is greater than the specified `LobMaxSize`. # + # The default value is `false`. + # # If task is set to Limited LOB mode and this option is set to true, # the task fails instead of truncating the LOB data. # @return [Boolean] @@ -9560,14 +9640,20 @@ class PendingMaintenanceAction < Struct.new( # slots from holding onto old WAL logs, which can result in storage # full situations on the source. This heartbeat keeps `restart_lsn` # moving and prevents storage full scenarios. + # + # The default value is `false`. # @return [Boolean] # # @!attribute [rw] heartbeat_schema # Sets the schema in which the heartbeat artifacts are created. + # + # The default value is `public`. # @return [String] # # @!attribute [rw] heartbeat_frequency # Sets the WAL heartbeat frequency (in minutes). + # + # The default value is 5 minutes. # @return [Integer] # # @!attribute [rw] password @@ -9625,6 +9711,8 @@ class PendingMaintenanceAction < Struct.new( # # @!attribute [rw] plugin_name # Specifies the plugin to use to create a replication slot. + # + # The default value is `pglogical`. # @return [String] # # @!attribute [rw] secrets_manager_access_role_arn @@ -9669,14 +9757,20 @@ class PendingMaintenanceAction < Struct.new( # default, PostgreSQL migrates booleans as `varchar(5)`. You must set # this setting on both the source and target endpoints for it to take # effect. + # + # The default value is `false`. # @return [Boolean] # # @!attribute [rw] map_jsonb_as_clob # When true, DMS migrates JSONB values as CLOB. + # + # The default value is `false`. # @return [Boolean] # # @!attribute [rw] map_long_varchar_as - # When true, DMS migrates LONG values as VARCHAR. + # Sets what datatype to map LONG values as. + # + # The default value is `wstring`. # @return [String] # # @!attribute [rw] database_mode @@ -9689,6 +9783,17 @@ class PendingMaintenanceAction < Struct.new( # The Babelfish for Aurora PostgreSQL database name for the endpoint. # @return [String] # + # @!attribute [rw] disable_unicode_source_filter + # Disables the Unicode source filter with PostgreSQL, for values + # passed into the Selection rule filter on Source Endpoint column + # values. By default DMS performs source filter comparisons using a + # Unicode string which can cause look ups to ignore the indexes in the + # text columns and slow down migrations. + # + # Unicode support should only be disabled when using a selection rule + # filter is on a text column in the Source database that is indexed. + # @return [Boolean] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/PostgreSQLSettings AWS API Documentation # class PostgreSQLSettings < Struct.new( @@ -9715,7 +9820,8 @@ class PostgreSQLSettings < Struct.new( :map_jsonb_as_clob, :map_long_varchar_as, :database_mode, - :babelfish_database_name) + :babelfish_database_name, + :disable_unicode_source_filter) SENSITIVE = [:password] include Aws::Structure end @@ -10712,7 +10818,7 @@ class RemoveTagsFromResourceResponse < Aws::EmptyStructure; end # @return [Types::ReplicationStats] # # @!attribute [rw] start_replication_type - # The replication type. + # The type of replication to start. # @return [String] # # @!attribute [rw] cdc_start_time @@ -11033,6 +11139,11 @@ class ReplicationConfig < Struct.new( # addressing. IPv6 only is not yet supported. # @return [String] # + # @!attribute [rw] kerberos_authentication_settings + # Specifies the ID of the secret that stores the key cache file + # required for kerberos authentication, when replicating an instance. + # @return [Types::KerberosAuthenticationSettings] + # # @see http://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/ReplicationInstance AWS API Documentation # class ReplicationInstance < Struct.new( @@ -11060,7 +11171,8 @@ class ReplicationInstance < Struct.new( :secondary_availability_zone, :free_until, :dns_name_servers, - :network_type) + :network_type, + :kerberos_authentication_settings) SENSITIVE = [] include Aws::Structure end @@ -11384,13 +11496,16 @@ class ReplicationSubnetGroupDoesNotCoverEnoughAZs < Struct.new( # The reason the replication task was stopped. This response parameter # can return one of the following values: # - # * `"Stop Reason NORMAL"` + # * `"Stop Reason NORMAL"` – The task completed successfully with no + # additional information returned. # # * `"Stop Reason RECOVERABLE_ERROR"` # # * `"Stop Reason FATAL_ERROR"` # - # * `"Stop Reason FULL_LOAD_ONLY_FINISHED"` + # * `"Stop Reason FULL_LOAD_ONLY_FINISHED"` – The task completed the + # full load phase. DMS applied cached changes if you set + # `StopTaskCachedChangesApplied` to `true`. # # * `"Stop Reason STOPPED_AFTER_FULL_LOAD"` – Full load completed, # with cached changes not applied @@ -11566,7 +11681,7 @@ class ReplicationTaskAssessmentResult < Struct.new( :assessment_results_file, :assessment_results, :s3_object_url) - SENSITIVE = [] + SENSITIVE = [:s3_object_url] include Aws::Structure end @@ -11618,6 +11733,9 @@ class ReplicationTaskAssessmentResult < Struct.new( # # * `"starting"` – The assessment run is starting, but resources are # not yet being provisioned for individual assessments. + # + # * `"warning"` – At least one individual assessment completed with a + # `warning` status. # @return [String] # # @!attribute [rw] replication_task_assessment_run_creation_date @@ -13135,6 +13253,23 @@ class StartRecommendationsRequestEntry < Struct.new( # # @!attribute [rw] start_replication_type # The replication type. + # + # When the replication type is `full-load` or `full-load-and-cdc`, the + # only valid value for the first run of the replication is + # `start-replication`. This option will start the replication. + # + # You can also use ReloadTables to reload specific tables that failed + # during replication instead of restarting the replication. + # + # The `resume-processing` option isn't applicable for a full-load + # replication, because you can't resume partially loaded tables + # during the full load phase. + # + # For a `full-load-and-cdc` replication, DMS migrates table data, and + # then applies data changes that occur on the source. To load all the + # tables again, and start capturing source changes, use + # `reload-target`. Otherwise use `resume-processing`, to replicate the + # changes from the last stop position. # @return [String] # # @!attribute [rw] cdc_start_time diff --git a/gems/aws-sdk-databasemigrationservice/sig/client.rbs b/gems/aws-sdk-databasemigrationservice/sig/client.rbs index 27f273d2e1f..9b05217d43c 100644 --- a/gems/aws-sdk-databasemigrationservice/sig/client.rbs +++ b/gems/aws-sdk-databasemigrationservice/sig/client.rbs @@ -349,7 +349,8 @@ module Aws include_table_alter_operations: bool?, include_control_details: bool?, include_null_and_empty: bool?, - no_hex_prefix: bool? + no_hex_prefix: bool?, + use_large_integer_value: bool? }, ?kafka_settings: { broker: ::String?, @@ -371,7 +372,8 @@ module Aws sasl_password: ::String?, no_hex_prefix: bool?, sasl_mechanism: ("scram-sha-512" | "plain")?, - ssl_endpoint_identification_algorithm: ("none" | "https")? + ssl_endpoint_identification_algorithm: ("none" | "https")?, + use_large_integer_value: bool? }, ?elasticsearch_settings: { service_access_role_arn: ::String, @@ -446,7 +448,8 @@ module Aws map_jsonb_as_clob: bool?, map_long_varchar_as: ("wstring" | "clob" | "nclob")?, database_mode: ("default" | "babelfish")?, - babelfish_database_name: ::String? + babelfish_database_name: ::String?, + disable_unicode_source_filter: bool? }, ?my_sql_settings: { after_connect_script: ::String?, @@ -508,7 +511,8 @@ module Aws secrets_manager_oracle_asm_secret_id: ::String?, trim_space_in_char: bool?, convert_timestamp_with_zone_to_utc: bool?, - open_transaction_window: ::Integer? + open_transaction_window: ::Integer?, + authentication_method: ("password" | "kerberos")? }, ?sybase_settings: { database_name: ::String?, @@ -536,7 +540,8 @@ module Aws secrets_manager_secret_id: ::String?, trim_space_in_char: bool?, tlog_access_mode: ("BackupOnly" | "PreferBackup" | "PreferTlog" | "TlogOnly")?, - force_lob_lookup: bool? + force_lob_lookup: bool?, + authentication_method: ("password" | "kerberos")? }, ?ibm_db_2_settings: { database_name: ::String?, @@ -768,7 +773,12 @@ module Aws ?publicly_accessible: bool, ?dns_name_servers: ::String, ?resource_identifier: ::String, - ?network_type: ::String + ?network_type: ::String, + ?kerberos_authentication_settings: { + key_cache_secret_id: ::String?, + key_cache_secret_iam_arn: ::String?, + krb_5_file_contents: ::String? + } ) -> _CreateReplicationInstanceResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _CreateReplicationInstanceResponseSuccess @@ -1969,7 +1979,8 @@ module Aws include_table_alter_operations: bool?, include_control_details: bool?, include_null_and_empty: bool?, - no_hex_prefix: bool? + no_hex_prefix: bool?, + use_large_integer_value: bool? }, ?kafka_settings: { broker: ::String?, @@ -1991,7 +2002,8 @@ module Aws sasl_password: ::String?, no_hex_prefix: bool?, sasl_mechanism: ("scram-sha-512" | "plain")?, - ssl_endpoint_identification_algorithm: ("none" | "https")? + ssl_endpoint_identification_algorithm: ("none" | "https")?, + use_large_integer_value: bool? }, ?elasticsearch_settings: { service_access_role_arn: ::String, @@ -2066,7 +2078,8 @@ module Aws map_jsonb_as_clob: bool?, map_long_varchar_as: ("wstring" | "clob" | "nclob")?, database_mode: ("default" | "babelfish")?, - babelfish_database_name: ::String? + babelfish_database_name: ::String?, + disable_unicode_source_filter: bool? }, ?my_sql_settings: { after_connect_script: ::String?, @@ -2128,7 +2141,8 @@ module Aws secrets_manager_oracle_asm_secret_id: ::String?, trim_space_in_char: bool?, convert_timestamp_with_zone_to_utc: bool?, - open_transaction_window: ::Integer? + open_transaction_window: ::Integer?, + authentication_method: ("password" | "kerberos")? }, ?sybase_settings: { database_name: ::String?, @@ -2156,7 +2170,8 @@ module Aws secrets_manager_secret_id: ::String?, trim_space_in_char: bool?, tlog_access_mode: ("BackupOnly" | "PreferBackup" | "PreferTlog" | "TlogOnly")?, - force_lob_lookup: bool? + force_lob_lookup: bool?, + authentication_method: ("password" | "kerberos")? }, ?ibm_db_2_settings: { database_name: ::String?, @@ -2334,7 +2349,12 @@ module Aws ?allow_major_version_upgrade: bool, ?auto_minor_version_upgrade: bool, ?replication_instance_identifier: ::String, - ?network_type: ::String + ?network_type: ::String, + ?kerberos_authentication_settings: { + key_cache_secret_id: ::String?, + key_cache_secret_iam_arn: ::String?, + krb_5_file_contents: ::String? + } ) -> _ModifyReplicationInstanceResponseSuccess | (Hash[Symbol, untyped] params, ?Hash[Symbol, untyped] options) -> _ModifyReplicationInstanceResponseSuccess diff --git a/gems/aws-sdk-databasemigrationservice/sig/types.rbs b/gems/aws-sdk-databasemigrationservice/sig/types.rbs index 60f2ba91487..3f62539e2ed 100644 --- a/gems/aws-sdk-databasemigrationservice/sig/types.rbs +++ b/gems/aws-sdk-databasemigrationservice/sig/types.rbs @@ -326,6 +326,7 @@ module Aws::DatabaseMigrationService attr_accessor dns_name_servers: ::String attr_accessor resource_identifier: ::String attr_accessor network_type: ::String + attr_accessor kerberos_authentication_settings: Types::KerberosAuthenticationSettings SENSITIVE: [] end @@ -1582,9 +1583,17 @@ module Aws::DatabaseMigrationService attr_accessor no_hex_prefix: bool attr_accessor sasl_mechanism: ("scram-sha-512" | "plain") attr_accessor ssl_endpoint_identification_algorithm: ("none" | "https") + attr_accessor use_large_integer_value: bool SENSITIVE: [:ssl_client_key_password, :sasl_password] end + class KerberosAuthenticationSettings + attr_accessor key_cache_secret_id: ::String + attr_accessor key_cache_secret_iam_arn: ::String + attr_accessor krb_5_file_contents: ::String + SENSITIVE: [] + end + class KinesisSettings attr_accessor stream_arn: ::String attr_accessor message_format: ("json" | "json-unformatted") @@ -1596,6 +1605,7 @@ module Aws::DatabaseMigrationService attr_accessor include_control_details: bool attr_accessor include_null_and_empty: bool attr_accessor no_hex_prefix: bool + attr_accessor use_large_integer_value: bool SENSITIVE: [] end @@ -1646,6 +1656,7 @@ module Aws::DatabaseMigrationService attr_accessor trim_space_in_char: bool attr_accessor tlog_access_mode: ("BackupOnly" | "PreferBackup" | "PreferTlog" | "TlogOnly") attr_accessor force_lob_lookup: bool + attr_accessor authentication_method: ("password" | "kerberos") SENSITIVE: [:password] end @@ -1838,6 +1849,7 @@ module Aws::DatabaseMigrationService attr_accessor auto_minor_version_upgrade: bool attr_accessor replication_instance_identifier: ::String attr_accessor network_type: ::String + attr_accessor kerberos_authentication_settings: Types::KerberosAuthenticationSettings SENSITIVE: [] end @@ -2015,6 +2027,7 @@ module Aws::DatabaseMigrationService attr_accessor trim_space_in_char: bool attr_accessor convert_timestamp_with_zone_to_utc: bool attr_accessor open_transaction_window: ::Integer + attr_accessor authentication_method: ("password" | "kerberos") SENSITIVE: [:asm_password, :password, :security_db_encryption] end @@ -2066,6 +2079,7 @@ module Aws::DatabaseMigrationService attr_accessor map_long_varchar_as: ("wstring" | "clob" | "nclob") attr_accessor database_mode: ("default" | "babelfish") attr_accessor babelfish_database_name: ::String + attr_accessor disable_unicode_source_filter: bool SENSITIVE: [:password] end @@ -2322,6 +2336,7 @@ module Aws::DatabaseMigrationService attr_accessor free_until: ::Time attr_accessor dns_name_servers: ::String attr_accessor network_type: ::String + attr_accessor kerberos_authentication_settings: Types::KerberosAuthenticationSettings SENSITIVE: [] end @@ -2402,7 +2417,7 @@ module Aws::DatabaseMigrationService attr_accessor assessment_results_file: ::String attr_accessor assessment_results: ::String attr_accessor s3_object_url: ::String - SENSITIVE: [] + SENSITIVE: [:s3_object_url] end class ReplicationTaskAssessmentRun diff --git a/gems/aws-sdk-glue/CHANGELOG.md b/gems/aws-sdk-glue/CHANGELOG.md index a2c3683a152..cfdc904e8a3 100644 --- a/gems/aws-sdk-glue/CHANGELOG.md +++ b/gems/aws-sdk-glue/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.205.0 (2024-12-12) +------------------ + +* Feature - To support customer-managed encryption in Data Quality to allow customers encrypt data with their own KMS key, we will add a DataQualityEncryption field to the SecurityConfiguration API where customers can provide their KMS keys. + 1.204.0 (2024-12-03) ------------------ diff --git a/gems/aws-sdk-glue/VERSION b/gems/aws-sdk-glue/VERSION index f197eff1ee7..2492aaba5b2 100644 --- a/gems/aws-sdk-glue/VERSION +++ b/gems/aws-sdk-glue/VERSION @@ -1 +1 @@ -1.204.0 +1.205.0 diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue.rb index a2b491d9c7d..7316c3b3654 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue.rb @@ -54,7 +54,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-glue/endpoint_provider' autoload :Endpoints, 'aws-sdk-glue/endpoints' - GEM_VERSION = '1.204.0' + GEM_VERSION = '1.205.0' end diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb index ab9f4b4a0e8..c25191f1d2d 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/client.rb @@ -4106,45 +4106,45 @@ def create_integration_table_properties(params = {}, options = {}) # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and provides - # 1 executor per worker. We recommend this worker type for workloads - # such as data transforms, joins, and queries, to offers a scalable - # and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. We + # recommend this worker type for workloads such as data transforms, + # joins, and queries, to offers a scalable and cost effective way to + # run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data transforms, + # joins, and queries, to offers a scalable and cost effective way to + # run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 - # GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available only - # for Glue version 3.0 or later Spark ETL jobs in the following Amazon - # Web Services Regions: US East (Ohio), US East (N. Virginia), US West - # (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia - # Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe - # (Ireland), and Europe (Stockholm). + # GB of memory) with 256GB disk, and provides 1 executor per worker. + # We recommend this worker type for jobs whose workloads contain your + # most demanding transforms, aggregations, joins, and queries. This + # worker type is available only for Glue version 3.0 or later Spark + # ETL jobs in the following Amazon Web Services Regions: US East + # (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific + # (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada + # (Central), Europe (Frankfurt), Europe (Ireland), and Europe + # (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 - # GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available only - # for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web - # Services Regions as supported for the `G.4X` worker type. + # GB of memory) with 512GB disk, and provides 1 executor per worker. + # We recommend this worker type for jobs whose workloads contain your + # most demanding transforms, aggregations, joins, and queries. This + # worker type is available only for Glue version 3.0 or later Spark + # ETL jobs, in the same Amazon Web Services Regions as supported for + # the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, - # 4 GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # low volume streaming jobs. This worker type is only available for - # Glue version 3.0 streaming jobs. + # 4 GB of memory) with 84GB disk, and provides 1 executor per worker. + # We recommend this worker type for low volume streaming jobs. This + # worker type is only available for Glue version 3.0 or later + # streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 - # GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # GB of memory) with 128 GB disk, and provides up to 8 Ray workers + # based on the autoscaler. # # @option params [Hash] :code_gen_configuration_nodes # The representation of a directed acyclic graph on which both the Glue @@ -4837,6 +4837,10 @@ def create_script(params = {}, options = {}) # job_bookmarks_encryption_mode: "DISABLED", # accepts DISABLED, CSE-KMS # kms_key_arn: "KmsKeyArn", # }, + # data_quality_encryption: { + # data_quality_encryption_mode: "DISABLED", # accepts DISABLED, SSE-KMS + # kms_key_arn: "KmsKeyArn", + # }, # }, # }) # @@ -4899,39 +4903,39 @@ def create_security_configuration(params = {}, options = {}) # the value Z.2X for Ray notebooks. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and provides - # 1 executor per worker. We recommend this worker type for workloads - # such as data transforms, joins, and queries, to offers a scalable - # and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. We + # recommend this worker type for workloads such as data transforms, + # joins, and queries, to offers a scalable and cost effective way to + # run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data transforms, + # joins, and queries, to offers a scalable and cost effective way to + # run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 - # GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available only - # for Glue version 3.0 or later Spark ETL jobs in the following Amazon - # Web Services Regions: US East (Ohio), US East (N. Virginia), US West - # (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia - # Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe - # (Ireland), and Europe (Stockholm). + # GB of memory) with 256GB disk, and provides 1 executor per worker. + # We recommend this worker type for jobs whose workloads contain your + # most demanding transforms, aggregations, joins, and queries. This + # worker type is available only for Glue version 3.0 or later Spark + # ETL jobs in the following Amazon Web Services Regions: US East + # (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific + # (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada + # (Central), Europe (Frankfurt), Europe (Ireland), and Europe + # (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 - # GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available only - # for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web - # Services Regions as supported for the `G.4X` worker type. + # GB of memory) with 512GB disk, and provides 1 executor per worker. + # We recommend this worker type for jobs whose workloads contain your + # most demanding transforms, aggregations, joins, and queries. This + # worker type is available only for Glue version 3.0 or later Spark + # ETL jobs, in the same Amazon Web Services Regions as supported for + # the `G.4X` worker type. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 - # GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # GB of memory) with 128 GB disk, and provides up to 8 Ray workers + # based on the autoscaler. # # @option params [String] :security_configuration # The name of the SecurityConfiguration structure to be used with the @@ -5227,6 +5231,11 @@ def create_table_optimizer(params = {}, options = {}) # Creates a new trigger. # + # Job arguments may be logged. Do not pass plaintext secrets as + # arguments. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to keep them within the Job. + # # @option params [required, String] :name # The name of the trigger. # @@ -5452,6 +5461,11 @@ def create_user_defined_function(params = {}, options = {}) # A collection of properties to be used as part of each execution of the # workflow. # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. + # # @option params [Hash] :tags # The tags to be used with this workflow. # @@ -9968,7 +9982,7 @@ def get_job_bookmark(params = {}, options = {}) end # Retrieves the metadata for a given job run. Job run history is - # accessible for 90 days for your workflow and job run. + # accessible for 365 days for your workflow and job run. # # @option params [required, String] :job_name # Name of the job definition being run. @@ -10037,6 +10051,9 @@ def get_job_run(params = {}, options = {}) # Retrieves metadata for all runs of a given job definition. # + # `GetJobRuns` returns the job runs in chronological order, with the + # newest jobs returned first. + # # @option params [required, String] :job_name # The name of the job definition for which to retrieve all job runs. # @@ -12320,6 +12337,8 @@ def get_schema_versions_diff(params = {}, options = {}) # resp.security_configuration.encryption_configuration.cloud_watch_encryption.kms_key_arn #=> String # resp.security_configuration.encryption_configuration.job_bookmarks_encryption.job_bookmarks_encryption_mode #=> String, one of "DISABLED", "CSE-KMS" # resp.security_configuration.encryption_configuration.job_bookmarks_encryption.kms_key_arn #=> String + # resp.security_configuration.encryption_configuration.data_quality_encryption.data_quality_encryption_mode #=> String, one of "DISABLED", "SSE-KMS" + # resp.security_configuration.encryption_configuration.data_quality_encryption.kms_key_arn #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfiguration AWS API Documentation # @@ -12364,6 +12383,8 @@ def get_security_configuration(params = {}, options = {}) # resp.security_configurations[0].encryption_configuration.cloud_watch_encryption.kms_key_arn #=> String # resp.security_configurations[0].encryption_configuration.job_bookmarks_encryption.job_bookmarks_encryption_mode #=> String, one of "DISABLED", "CSE-KMS" # resp.security_configurations[0].encryption_configuration.job_bookmarks_encryption.kms_key_arn #=> String + # resp.security_configurations[0].encryption_configuration.data_quality_encryption.data_quality_encryption_mode #=> String, one of "DISABLED", "SSE-KMS" + # resp.security_configurations[0].encryption_configuration.data_quality_encryption.kms_key_arn #=> String # resp.next_token #=> String # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetSecurityConfigurations AWS API Documentation @@ -16281,6 +16302,11 @@ def put_schema_version_metadata(params = {}, options = {}) # @option params [required, Hash] :run_properties # The properties to put for the specified run. # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. + # # @return [Struct] Returns an empty {Seahorse::Client::Response response}. # # @example Request syntax with placeholder values @@ -17390,45 +17416,45 @@ def start_import_labels_task_run(params = {}, options = {}) # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and provides - # 1 executor per worker. We recommend this worker type for workloads - # such as data transforms, joins, and queries, to offers a scalable - # and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. We + # recommend this worker type for workloads such as data transforms, + # joins, and queries, to offers a scalable and cost effective way to + # run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data transforms, + # joins, and queries, to offers a scalable and cost effective way to + # run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, 64 - # GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available only - # for Glue version 3.0 or later Spark ETL jobs in the following Amazon - # Web Services Regions: US East (Ohio), US East (N. Virginia), US West - # (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia - # Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe - # (Ireland), and Europe (Stockholm). + # GB of memory) with 256GB disk, and provides 1 executor per worker. + # We recommend this worker type for jobs whose workloads contain your + # most demanding transforms, aggregations, joins, and queries. This + # worker type is available only for Glue version 3.0 or later Spark + # ETL jobs in the following Amazon Web Services Regions: US East + # (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific + # (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada + # (Central), Europe (Frankfurt), Europe (Ireland), and Europe + # (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, 128 - # GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available only - # for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web - # Services Regions as supported for the `G.4X` worker type. + # GB of memory) with 512GB disk, and provides 1 executor per worker. + # We recommend this worker type for jobs whose workloads contain your + # most demanding transforms, aggregations, joins, and queries. This + # worker type is available only for Glue version 3.0 or later Spark + # ETL jobs, in the same Amazon Web Services Regions as supported for + # the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 vCPUs, - # 4 GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # low volume streaming jobs. This worker type is only available for - # Glue version 3.0 streaming jobs. + # 4 GB of memory) with 84GB disk, and provides 1 executor per worker. + # We recommend this worker type for low volume streaming jobs. This + # worker type is only available for Glue version 3.0 or later + # streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, 64 - # GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # GB of memory) with 128 GB disk, and provides up to 8 Ray workers + # based on the autoscaler. # # @option params [Integer] :number_of_workers # The number of workers of a defined `workerType` that are allocated @@ -17609,6 +17635,11 @@ def start_trigger(params = {}, options = {}) # @option params [Hash] :run_properties # The workflow run properties for the new workflow run. # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. + # # @return [Types::StartWorkflowRunResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods: # # * {Types::StartWorkflowRunResponse#run_id #run_id} => String @@ -19736,6 +19767,11 @@ def update_table_optimizer(params = {}, options = {}) # Updates a trigger definition. # + # Job arguments may be logged. Do not pass plaintext secrets as + # arguments. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to keep them within the Job. + # # @option params [required, String] :name # The name of the trigger to update. # @@ -19938,6 +19974,11 @@ def update_user_defined_function(params = {}, options = {}) # A collection of properties to be used as part of each execution of the # workflow. # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. + # # @option params [Integer] :max_concurrent_runs # You can use this parameter to prevent unwanted multiple updates to # data, to control costs, or in some cases, to prevent exceeding the @@ -19991,7 +20032,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-glue' - context[:gem_version] = '1.204.0' + context[:gem_version] = '1.205.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb index 3b03aaf63d4..5921bc0b700 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/client_api.rb @@ -389,6 +389,8 @@ module ClientApi DataOperations = Shapes::ListShape.new(name: 'DataOperations') DataQualityAnalyzerResult = Shapes::StructureShape.new(name: 'DataQualityAnalyzerResult') DataQualityAnalyzerResults = Shapes::ListShape.new(name: 'DataQualityAnalyzerResults') + DataQualityEncryption = Shapes::StructureShape.new(name: 'DataQualityEncryption') + DataQualityEncryptionMode = Shapes::StringShape.new(name: 'DataQualityEncryptionMode') DataQualityEvaluationRunAdditionalRunOptions = Shapes::StructureShape.new(name: 'DataQualityEvaluationRunAdditionalRunOptions') DataQualityMetricValues = Shapes::StructureShape.new(name: 'DataQualityMetricValues') DataQualityModelStatus = Shapes::StringShape.new(name: 'DataQualityModelStatus') @@ -3018,6 +3020,10 @@ module ClientApi DataQualityAnalyzerResults.member = Shapes::ShapeRef.new(shape: DataQualityAnalyzerResult) + DataQualityEncryption.add_member(:data_quality_encryption_mode, Shapes::ShapeRef.new(shape: DataQualityEncryptionMode, location_name: "DataQualityEncryptionMode")) + DataQualityEncryption.add_member(:kms_key_arn, Shapes::ShapeRef.new(shape: KmsKeyArn, location_name: "KmsKeyArn")) + DataQualityEncryption.struct_class = Types::DataQualityEncryption + DataQualityEvaluationRunAdditionalRunOptions.add_member(:cloud_watch_metrics_enabled, Shapes::ShapeRef.new(shape: NullableBoolean, location_name: "CloudWatchMetricsEnabled")) DataQualityEvaluationRunAdditionalRunOptions.add_member(:results_s3_prefix, Shapes::ShapeRef.new(shape: UriString, location_name: "ResultsS3Prefix")) DataQualityEvaluationRunAdditionalRunOptions.add_member(:composite_rule_evaluation_method, Shapes::ShapeRef.new(shape: DQCompositeRuleEvaluationMethod, location_name: "CompositeRuleEvaluationMethod")) @@ -3592,6 +3598,7 @@ module ClientApi EncryptionConfiguration.add_member(:s3_encryption, Shapes::ShapeRef.new(shape: S3EncryptionList, location_name: "S3Encryption")) EncryptionConfiguration.add_member(:cloud_watch_encryption, Shapes::ShapeRef.new(shape: CloudWatchEncryption, location_name: "CloudWatchEncryption")) EncryptionConfiguration.add_member(:job_bookmarks_encryption, Shapes::ShapeRef.new(shape: JobBookmarksEncryption, location_name: "JobBookmarksEncryption")) + EncryptionConfiguration.add_member(:data_quality_encryption, Shapes::ShapeRef.new(shape: DataQualityEncryption, location_name: "DataQualityEncryption")) EncryptionConfiguration.struct_class = Types::EncryptionConfiguration Entity.add_member(:entity_name, Shapes::ShapeRef.new(shape: EntityName, location_name: "EntityName")) diff --git a/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb b/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb index f21a27cd59c..096366e71cf 100644 --- a/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb +++ b/gems/aws-sdk-glue/lib/aws-sdk-glue/types.rb @@ -6000,46 +6000,45 @@ class CreateIntegrationTablePropertiesResponse < Aws::EmptyStructure; end # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, - # 64 GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs in the following - # Amazon Web Services Regions: US East (Ohio), US East (N. - # Virginia), US West (Oregon), Asia Pacific (Singapore), Asia - # Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe - # (Frankfurt), Europe (Ireland), and Europe (Stockholm). + # 64 GB of memory) with 256GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs in the following Amazon Web Services + # Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), + # Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific + # (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), + # and Europe (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, - # 128 GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs, in the same - # Amazon Web Services Regions as supported for the `G.4X` worker - # type. + # 128 GB of memory) with 512GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs, in the same Amazon Web Services Regions + # as supported for the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 - # vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), - # and provides 1 executor per worker. We recommend this worker type - # for low volume streaming jobs. This worker type is only available - # for Glue version 3.0 streaming jobs. + # vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per + # worker. We recommend this worker type for low volume streaming + # jobs. This worker type is only available for Glue version 3.0 or + # later streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, - # 64 GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # 64 GB of memory) with 128 GB disk, and provides up to 8 Ray + # workers based on the autoscaler. # @return [String] # # @!attribute [rw] code_gen_configuration_nodes @@ -6754,40 +6753,39 @@ class CreateSecurityConfigurationResponse < Struct.new( # the value Z.2X for Ray notebooks. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, - # 64 GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs in the following - # Amazon Web Services Regions: US East (Ohio), US East (N. - # Virginia), US West (Oregon), Asia Pacific (Singapore), Asia - # Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe - # (Frankfurt), Europe (Ireland), and Europe (Stockholm). + # 64 GB of memory) with 256GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs in the following Amazon Web Services + # Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), + # Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific + # (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), + # and Europe (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, - # 128 GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs, in the same - # Amazon Web Services Regions as supported for the `G.4X` worker - # type. + # 128 GB of memory) with 512GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs, in the same Amazon Web Services Regions + # as supported for the `G.4X` worker type. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, - # 64 GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # 64 GB of memory) with 128 GB disk, and provides up to 8 Ray + # workers based on the autoscaler. # @return [String] # # @!attribute [rw] security_configuration @@ -7093,6 +7091,11 @@ class CreateUserDefinedFunctionResponse < Aws::EmptyStructure; end # @!attribute [rw] default_run_properties # A collection of properties to be used as part of each execution of # the workflow. + # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. # @return [Hash] # # @!attribute [rw] tags @@ -7513,6 +7516,31 @@ class DataQualityAnalyzerResult < Struct.new( include Aws::Structure end + # Specifies how Data Quality assets in your account should be encrypted. + # + # @!attribute [rw] data_quality_encryption_mode + # The encryption mode to use for encrypting Data Quality assets. These + # assets include data quality rulesets, results, statistics, anomaly + # detection models and observations. + # + # Valid values are `SSEKMS` for encryption using a customer-managed + # KMS key, or `DISABLED`. + # @return [String] + # + # @!attribute [rw] kms_key_arn + # The Amazon Resource Name (ARN) of the KMS key to be used to encrypt + # the data. + # @return [String] + # + # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DataQualityEncryption AWS API Documentation + # + class DataQualityEncryption < Struct.new( + :data_quality_encryption_mode, + :kms_key_arn) + SENSITIVE = [] + include Aws::Structure + end + # Additional run options you can specify for an evaluation run. # # @!attribute [rw] cloud_watch_metrics_enabled @@ -10104,12 +10132,17 @@ class EncryptionAtRest < Struct.new( # The encryption configuration for job bookmarks. # @return [Types::JobBookmarksEncryption] # + # @!attribute [rw] data_quality_encryption + # The encryption configuration for Glue Data Quality assets. + # @return [Types::DataQualityEncryption] + # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/EncryptionConfiguration AWS API Documentation # class EncryptionConfiguration < Struct.new( :s3_encryption, :cloud_watch_encryption, - :job_bookmarks_encryption) + :job_bookmarks_encryption, + :data_quality_encryption) SENSITIVE = [] include Aws::Structure end @@ -16179,46 +16212,45 @@ class JdbcTarget < Struct.new( # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, - # 64 GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs in the following - # Amazon Web Services Regions: US East (Ohio), US East (N. - # Virginia), US West (Oregon), Asia Pacific (Singapore), Asia - # Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe - # (Frankfurt), Europe (Ireland), and Europe (Stockholm). + # 64 GB of memory) with 256GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs in the following Amazon Web Services + # Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), + # Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific + # (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), + # and Europe (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, - # 128 GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs, in the same - # Amazon Web Services Regions as supported for the `G.4X` worker - # type. + # 128 GB of memory) with 512GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs, in the same Amazon Web Services Regions + # as supported for the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 - # vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), - # and provides 1 executor per worker. We recommend this worker type - # for low volume streaming jobs. This worker type is only available - # for Glue version 3.0 streaming jobs. + # vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per + # worker. We recommend this worker type for low volume streaming + # jobs. This worker type is only available for Glue version 3.0 or + # later streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, - # 64 GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # 64 GB of memory) with 128 GB disk, and provides up to 8 Ray + # workers based on the autoscaler. # @return [String] # # @!attribute [rw] number_of_workers @@ -16630,46 +16662,45 @@ class JobNodeDetails < Struct.new( # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, - # 64 GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs in the following - # Amazon Web Services Regions: US East (Ohio), US East (N. - # Virginia), US West (Oregon), Asia Pacific (Singapore), Asia - # Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe - # (Frankfurt), Europe (Ireland), and Europe (Stockholm). + # 64 GB of memory) with 256GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs in the following Amazon Web Services + # Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), + # Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific + # (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), + # and Europe (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, - # 128 GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs, in the same - # Amazon Web Services Regions as supported for the `G.4X` worker - # type. + # 128 GB of memory) with 512GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs, in the same Amazon Web Services Regions + # as supported for the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 - # vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), - # and provides 1 executor per worker. We recommend this worker type - # for low volume streaming jobs. This worker type is only available - # for Glue version 3.0 streaming jobs. + # vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per + # worker. We recommend this worker type for low volume streaming + # jobs. This worker type is only available for Glue version 3.0 or + # later streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, - # 64 GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # 64 GB of memory) with 128 GB disk, and provides up to 8 Ray + # workers based on the autoscaler. # @return [String] # # @!attribute [rw] number_of_workers @@ -16968,46 +16999,45 @@ class JobRun < Struct.new( # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, - # 64 GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs in the following - # Amazon Web Services Regions: US East (Ohio), US East (N. - # Virginia), US West (Oregon), Asia Pacific (Singapore), Asia - # Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe - # (Frankfurt), Europe (Ireland), and Europe (Stockholm). + # 64 GB of memory) with 256GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs in the following Amazon Web Services + # Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), + # Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific + # (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), + # and Europe (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, - # 128 GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs, in the same - # Amazon Web Services Regions as supported for the `G.4X` worker - # type. + # 128 GB of memory) with 512GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs, in the same Amazon Web Services Regions + # as supported for the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 - # vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), - # and provides 1 executor per worker. We recommend this worker type - # for low volume streaming jobs. This worker type is only available - # for Glue version 3.0 streaming jobs. + # vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per + # worker. We recommend this worker type for low volume streaming + # jobs. This worker type is only available for Glue version 3.0 or + # later streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, - # 64 GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # 64 GB of memory) with 128 GB disk, and provides up to 8 Ray + # workers based on the autoscaler. # @return [String] # # @!attribute [rw] number_of_workers @@ -20636,6 +20666,11 @@ class PutSchemaVersionMetadataResponse < Struct.new( # # @!attribute [rw] run_properties # The properties to put for the specified run. + # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. # @return [Hash] # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/PutWorkflowRunPropertiesRequest AWS API Documentation @@ -23997,46 +24032,45 @@ class StartImportLabelsTaskRunResponse < Struct.new( # Accepts the value Z.2X for Ray jobs. # # * For the `G.1X` worker type, each worker maps to 1 DPU (4 vCPUs, 16 - # GB of memory) with 84GB disk (approximately 34GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 94GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.2X` worker type, each worker maps to 2 DPU (8 vCPUs, 32 - # GB of memory) with 128GB disk (approximately 77GB free), and - # provides 1 executor per worker. We recommend this worker type for - # workloads such as data transforms, joins, and queries, to offers a - # scalable and cost effective way to run most jobs. + # GB of memory) with 138GB disk, and provides 1 executor per worker. + # We recommend this worker type for workloads such as data + # transforms, joins, and queries, to offers a scalable and cost + # effective way to run most jobs. # # * For the `G.4X` worker type, each worker maps to 4 DPU (16 vCPUs, - # 64 GB of memory) with 256GB disk (approximately 235GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs in the following - # Amazon Web Services Regions: US East (Ohio), US East (N. - # Virginia), US West (Oregon), Asia Pacific (Singapore), Asia - # Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe - # (Frankfurt), Europe (Ireland), and Europe (Stockholm). + # 64 GB of memory) with 256GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs in the following Amazon Web Services + # Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), + # Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific + # (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), + # and Europe (Stockholm). # # * For the `G.8X` worker type, each worker maps to 8 DPU (32 vCPUs, - # 128 GB of memory) with 512GB disk (approximately 487GB free), and - # provides 1 executor per worker. We recommend this worker type for - # jobs whose workloads contain your most demanding transforms, - # aggregations, joins, and queries. This worker type is available - # only for Glue version 3.0 or later Spark ETL jobs, in the same - # Amazon Web Services Regions as supported for the `G.4X` worker - # type. + # 128 GB of memory) with 512GB disk, and provides 1 executor per + # worker. We recommend this worker type for jobs whose workloads + # contain your most demanding transforms, aggregations, joins, and + # queries. This worker type is available only for Glue version 3.0 + # or later Spark ETL jobs, in the same Amazon Web Services Regions + # as supported for the `G.4X` worker type. # # * For the `G.025X` worker type, each worker maps to 0.25 DPU (2 - # vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), - # and provides 1 executor per worker. We recommend this worker type - # for low volume streaming jobs. This worker type is only available - # for Glue version 3.0 streaming jobs. + # vCPUs, 4 GB of memory) with 84GB disk, and provides 1 executor per + # worker. We recommend this worker type for low volume streaming + # jobs. This worker type is only available for Glue version 3.0 or + # later streaming jobs. # # * For the `Z.2X` worker type, each worker maps to 2 M-DPU (8vCPUs, - # 64 GB of memory) with 128 GB disk (approximately 120GB free), and - # provides up to 8 Ray workers based on the autoscaler. + # 64 GB of memory) with 128 GB disk, and provides up to 8 Ray + # workers based on the autoscaler. # @return [String] # # @!attribute [rw] number_of_workers @@ -24173,6 +24207,11 @@ class StartTriggerResponse < Struct.new( # # @!attribute [rw] run_properties # The workflow run properties for the new workflow run. + # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. # @return [Hash] # # @see http://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StartWorkflowRunRequest AWS API Documentation @@ -27521,6 +27560,11 @@ class UpdateUserDefinedFunctionResponse < Aws::EmptyStructure; end # @!attribute [rw] default_run_properties # A collection of properties to be used as part of each execution of # the workflow. + # + # Run properties may be logged. Do not pass plaintext secrets as + # properties. Retrieve secrets from a Glue Connection, Amazon Web + # Services Secrets Manager or other secret management mechanism if you + # intend to use them within the workflow run. # @return [Hash] # # @!attribute [rw] max_concurrent_runs diff --git a/gems/aws-sdk-glue/sig/client.rbs b/gems/aws-sdk-glue/sig/client.rbs index b7c635ccbe0..0a0ef99f3b4 100644 --- a/gems/aws-sdk-glue/sig/client.rbs +++ b/gems/aws-sdk-glue/sig/client.rbs @@ -2401,6 +2401,10 @@ module Aws job_bookmarks_encryption: { job_bookmarks_encryption_mode: ("DISABLED" | "CSE-KMS")?, kms_key_arn: ::String? + }?, + data_quality_encryption: { + data_quality_encryption_mode: ("DISABLED" | "SSE-KMS")?, + kms_key_arn: ::String? }? } ) -> _CreateSecurityConfigurationResponseSuccess diff --git a/gems/aws-sdk-glue/sig/types.rbs b/gems/aws-sdk-glue/sig/types.rbs index 926005cef39..ed47899be35 100644 --- a/gems/aws-sdk-glue/sig/types.rbs +++ b/gems/aws-sdk-glue/sig/types.rbs @@ -1715,6 +1715,12 @@ module Aws::Glue SENSITIVE: [:description, :evaluation_message, :evaluated_metrics] end + class DataQualityEncryption + attr_accessor data_quality_encryption_mode: ("DISABLED" | "SSE-KMS") + attr_accessor kms_key_arn: ::String + SENSITIVE: [] + end + class DataQualityEvaluationRunAdditionalRunOptions attr_accessor cloud_watch_metrics_enabled: bool attr_accessor results_s3_prefix: ::String @@ -2443,6 +2449,7 @@ module Aws::Glue attr_accessor s3_encryption: ::Array[Types::S3Encryption] attr_accessor cloud_watch_encryption: Types::CloudWatchEncryption attr_accessor job_bookmarks_encryption: Types::JobBookmarksEncryption + attr_accessor data_quality_encryption: Types::DataQualityEncryption SENSITIVE: [] end diff --git a/gems/aws-sdk-guardduty/CHANGELOG.md b/gems/aws-sdk-guardduty/CHANGELOG.md index cb1ea0a05c8..9f6b0d29ea2 100644 --- a/gems/aws-sdk-guardduty/CHANGELOG.md +++ b/gems/aws-sdk-guardduty/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.109.0 (2024-12-12) +------------------ + +* Feature - Improved descriptions for certain APIs. + 1.108.0 (2024-12-02) ------------------ diff --git a/gems/aws-sdk-guardduty/VERSION b/gems/aws-sdk-guardduty/VERSION index 83033d17f98..938d55c5464 100644 --- a/gems/aws-sdk-guardduty/VERSION +++ b/gems/aws-sdk-guardduty/VERSION @@ -1 +1 @@ -1.108.0 +1.109.0 diff --git a/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty.rb b/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty.rb index d775704799b..d79880a8b57 100644 --- a/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty.rb +++ b/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty.rb @@ -54,7 +54,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-guardduty/endpoint_provider' autoload :Endpoints, 'aws-sdk-guardduty/endpoints' - GEM_VERSION = '1.108.0' + GEM_VERSION = '1.109.0' end diff --git a/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/client.rb b/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/client.rb index 34af22ed4b7..4fe46fd039b 100644 --- a/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/client.rb +++ b/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/client.rb @@ -740,9 +740,11 @@ def create_detector(params = {}, options = {}) # # * **Medium**: `["4", "5", "6"]` # - # * **High**: `["7", "8", "9"]` - # For more information, see [Severity levels for GuardDuty - # findings][2]. + # * **High**: `["7", "8"]` + # + # * **Critical**: `["9", "10"]` + # For more information, see [Findings severity levels][2] in the + # *Amazon GuardDuty User Guide*. # # * type # @@ -933,7 +935,7 @@ def create_detector(params = {}, options = {}) # # # [1]: https://docs.aws.amazon.com/guardduty/latest/APIReference/API_FindingCriteria.html - # [2]: https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity + # [2]: https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings-severity.html # # @option params [String] :client_token # The idempotency token for the create request. @@ -5328,8 +5330,10 @@ def update_member_detectors(params = {}, options = {}) # [1]: https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html # # @option params [Boolean] :auto_enable - # Represents whether or not to automatically enable member accounts in - # the organization. + # Represents whether to automatically enable member accounts in the + # organization. This applies to only new member accounts, not the + # existing member accounts. When a new account joins the organization, + # the chosen features will be enabled for them by default. # # Even though this is still supported, we recommend using # `AutoEnableOrganizationMembers` to achieve the similar results. You @@ -5530,7 +5534,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-guardduty' - context[:gem_version] = '1.108.0' + context[:gem_version] = '1.109.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/types.rb b/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/types.rb index 536d42802e3..b911feccd2d 100644 --- a/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/types.rb +++ b/gems/aws-sdk-guardduty/lib/aws-sdk-guardduty/types.rb @@ -1367,9 +1367,11 @@ class CreateDetectorResponse < Struct.new( # # * **Medium**: `["4", "5", "6"]` # - # * **High**: `["7", "8", "9"]` - # For more information, see [Severity levels for GuardDuty - # findings][2]. + # * **High**: `["7", "8"]` + # + # * **Critical**: `["9", "10"]` + # For more information, see [Findings severity levels][2] in the + # *Amazon GuardDuty User Guide*. # # * type # @@ -1560,7 +1562,7 @@ class CreateDetectorResponse < Struct.new( # # # [1]: https://docs.aws.amazon.com/guardduty/latest/APIReference/API_FindingCriteria.html - # [2]: https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings.html#guardduty_findings-severity + # [2]: https://docs.aws.amazon.com/guardduty/latest/ug/guardduty_findings-severity.html # @return [Types::FindingCriteria] # # @!attribute [rw] client_token @@ -2441,7 +2443,8 @@ class DescribeMalwareScansRequest < Struct.new( end # @!attribute [rw] scans - # Contains information about malware scans. + # Contains information about malware scans associated with GuardDuty + # Malware Protection for EC2. # @return [Array] # # @!attribute [rw] next_token @@ -6595,9 +6598,13 @@ class Organization < Struct.new( # A list of additional configurations which will be configured for the # organization. # + # Additional configuration applies to only GuardDuty Runtime Monitoring + # protection plan. + # # @!attribute [rw] name # The name of the additional configuration that will be configured for - # the organization. + # the organization. These values are applicable to only Runtime + # Monitoring protection plan. # @return [String] # # @!attribute [rw] auto_enable @@ -6636,7 +6643,8 @@ class OrganizationAdditionalConfiguration < Struct.new( # # @!attribute [rw] name # The name of the additional configuration that is configured for the - # member accounts within the organization. + # member accounts within the organization. These values are applicable + # to only Runtime Monitoring protection plan. # @return [String] # # @!attribute [rw] auto_enable @@ -8249,10 +8257,11 @@ class S3ObjectDetail < Struct.new( include Aws::Structure end - # Contains information about a malware scan. + # Contains information about malware scans associated with GuardDuty + # Malware Protection for EC2. # # @!attribute [rw] detector_id - # The unique ID of the detector that the request is associated with. + # The unique ID of the detector that is associated with the request. # # To find the `detectorId` in the current Region, see the Settings # page in the GuardDuty console, or run the [ListDetectors][1] API. @@ -8284,7 +8293,7 @@ class S3ObjectDetail < Struct.new( # @return [String] # # @!attribute [rw] failure_reason - # Represents the reason for FAILED scan status. + # Represents the reason for `FAILED` scan status. # @return [String] # # @!attribute [rw] scan_start_time @@ -9663,8 +9672,10 @@ class UpdateMemberDetectorsResponse < Struct.new( # @return [String] # # @!attribute [rw] auto_enable - # Represents whether or not to automatically enable member accounts in - # the organization. + # Represents whether to automatically enable member accounts in the + # organization. This applies to only new member accounts, not the + # existing member accounts. When a new account joins the organization, + # the chosen features will be enabled for them by default. # # Even though this is still supported, we recommend using # `AutoEnableOrganizationMembers` to achieve the similar results. You diff --git a/gems/aws-sdk-route53domains/CHANGELOG.md b/gems/aws-sdk-route53domains/CHANGELOG.md index 73ed98afcca..22e38eb40c3 100644 --- a/gems/aws-sdk-route53domains/CHANGELOG.md +++ b/gems/aws-sdk-route53domains/CHANGELOG.md @@ -1,6 +1,11 @@ Unreleased Changes ------------------ +1.72.0 (2024-12-12) +------------------ + +* Feature - This release includes the following API updates: added the enumeration type RESTORE_DOMAIN to the OperationType; constrained the Price attribute to non-negative values; updated the LangCode to allow 2 or 3 alphabetical characters. + 1.71.0 (2024-11-18) ------------------ diff --git a/gems/aws-sdk-route53domains/VERSION b/gems/aws-sdk-route53domains/VERSION index df484cbb1d9..0834888f558 100644 --- a/gems/aws-sdk-route53domains/VERSION +++ b/gems/aws-sdk-route53domains/VERSION @@ -1 +1 @@ -1.71.0 +1.72.0 diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb index e352e22edd1..fb96e431c3f 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains.rb @@ -54,7 +54,7 @@ module Plugins autoload :EndpointProvider, 'aws-sdk-route53domains/endpoint_provider' autoload :Endpoints, 'aws-sdk-route53domains/endpoints' - GEM_VERSION = '1.71.0' + GEM_VERSION = '1.72.0' end diff --git a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb index 9f44d2f56bc..0b1eb28f8bf 100644 --- a/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb +++ b/gems/aws-sdk-route53domains/lib/aws-sdk-route53domains/client.rb @@ -1259,7 +1259,7 @@ def get_domain_suggestions(params = {}, options = {}) # resp.status #=> String, one of "SUBMITTED", "IN_PROGRESS", "ERROR", "SUCCESSFUL", "FAILED" # resp.message #=> String # resp.domain_name #=> String - # resp.type #=> String, one of "REGISTER_DOMAIN", "DELETE_DOMAIN", "TRANSFER_IN_DOMAIN", "UPDATE_DOMAIN_CONTACT", "UPDATE_NAMESERVER", "CHANGE_PRIVACY_PROTECTION", "DOMAIN_LOCK", "ENABLE_AUTORENEW", "DISABLE_AUTORENEW", "ADD_DNSSEC", "REMOVE_DNSSEC", "EXPIRE_DOMAIN", "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", "TRANSFER_ON_RENEW" + # resp.type #=> String, one of "REGISTER_DOMAIN", "DELETE_DOMAIN", "TRANSFER_IN_DOMAIN", "UPDATE_DOMAIN_CONTACT", "UPDATE_NAMESERVER", "CHANGE_PRIVACY_PROTECTION", "DOMAIN_LOCK", "ENABLE_AUTORENEW", "DISABLE_AUTORENEW", "ADD_DNSSEC", "REMOVE_DNSSEC", "EXPIRE_DOMAIN", "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", "TRANSFER_ON_RENEW", "RESTORE_DOMAIN" # resp.submitted_date #=> Time # resp.last_updated_date #=> Time # resp.status_flag #=> String, one of "PENDING_ACCEPTANCE", "PENDING_CUSTOMER_ACTION", "PENDING_AUTHORIZATION", "PENDING_PAYMENT_VERIFICATION", "PENDING_SUPPORT_CASE" @@ -1397,7 +1397,7 @@ def list_domains(params = {}, options = {}) # marker: "PageMarker", # max_items: 1, # status: ["SUBMITTED"], # accepts SUBMITTED, IN_PROGRESS, ERROR, SUCCESSFUL, FAILED - # type: ["REGISTER_DOMAIN"], # accepts REGISTER_DOMAIN, DELETE_DOMAIN, TRANSFER_IN_DOMAIN, UPDATE_DOMAIN_CONTACT, UPDATE_NAMESERVER, CHANGE_PRIVACY_PROTECTION, DOMAIN_LOCK, ENABLE_AUTORENEW, DISABLE_AUTORENEW, ADD_DNSSEC, REMOVE_DNSSEC, EXPIRE_DOMAIN, TRANSFER_OUT_DOMAIN, CHANGE_DOMAIN_OWNER, RENEW_DOMAIN, PUSH_DOMAIN, INTERNAL_TRANSFER_OUT_DOMAIN, INTERNAL_TRANSFER_IN_DOMAIN, RELEASE_TO_GANDI, TRANSFER_ON_RENEW + # type: ["REGISTER_DOMAIN"], # accepts REGISTER_DOMAIN, DELETE_DOMAIN, TRANSFER_IN_DOMAIN, UPDATE_DOMAIN_CONTACT, UPDATE_NAMESERVER, CHANGE_PRIVACY_PROTECTION, DOMAIN_LOCK, ENABLE_AUTORENEW, DISABLE_AUTORENEW, ADD_DNSSEC, REMOVE_DNSSEC, EXPIRE_DOMAIN, TRANSFER_OUT_DOMAIN, CHANGE_DOMAIN_OWNER, RENEW_DOMAIN, PUSH_DOMAIN, INTERNAL_TRANSFER_OUT_DOMAIN, INTERNAL_TRANSFER_IN_DOMAIN, RELEASE_TO_GANDI, TRANSFER_ON_RENEW, RESTORE_DOMAIN # sort_by: "SubmittedDate", # accepts SubmittedDate # sort_order: "ASC", # accepts ASC, DESC # }) @@ -1407,7 +1407,7 @@ def list_domains(params = {}, options = {}) # resp.operations #=> Array # resp.operations[0].operation_id #=> String # resp.operations[0].status #=> String, one of "SUBMITTED", "IN_PROGRESS", "ERROR", "SUCCESSFUL", "FAILED" - # resp.operations[0].type #=> String, one of "REGISTER_DOMAIN", "DELETE_DOMAIN", "TRANSFER_IN_DOMAIN", "UPDATE_DOMAIN_CONTACT", "UPDATE_NAMESERVER", "CHANGE_PRIVACY_PROTECTION", "DOMAIN_LOCK", "ENABLE_AUTORENEW", "DISABLE_AUTORENEW", "ADD_DNSSEC", "REMOVE_DNSSEC", "EXPIRE_DOMAIN", "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", "TRANSFER_ON_RENEW" + # resp.operations[0].type #=> String, one of "REGISTER_DOMAIN", "DELETE_DOMAIN", "TRANSFER_IN_DOMAIN", "UPDATE_DOMAIN_CONTACT", "UPDATE_NAMESERVER", "CHANGE_PRIVACY_PROTECTION", "DOMAIN_LOCK", "ENABLE_AUTORENEW", "DISABLE_AUTORENEW", "ADD_DNSSEC", "REMOVE_DNSSEC", "EXPIRE_DOMAIN", "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", "TRANSFER_ON_RENEW", "RESTORE_DOMAIN" # resp.operations[0].submitted_date #=> Time # resp.operations[0].domain_name #=> String # resp.operations[0].message #=> String @@ -2799,7 +2799,7 @@ def update_tags_for_domain(params = {}, options = {}) # resp.next_page_marker #=> String # resp.billing_records #=> Array # resp.billing_records[0].domain_name #=> String - # resp.billing_records[0].operation #=> String, one of "REGISTER_DOMAIN", "DELETE_DOMAIN", "TRANSFER_IN_DOMAIN", "UPDATE_DOMAIN_CONTACT", "UPDATE_NAMESERVER", "CHANGE_PRIVACY_PROTECTION", "DOMAIN_LOCK", "ENABLE_AUTORENEW", "DISABLE_AUTORENEW", "ADD_DNSSEC", "REMOVE_DNSSEC", "EXPIRE_DOMAIN", "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", "TRANSFER_ON_RENEW" + # resp.billing_records[0].operation #=> String, one of "REGISTER_DOMAIN", "DELETE_DOMAIN", "TRANSFER_IN_DOMAIN", "UPDATE_DOMAIN_CONTACT", "UPDATE_NAMESERVER", "CHANGE_PRIVACY_PROTECTION", "DOMAIN_LOCK", "ENABLE_AUTORENEW", "DISABLE_AUTORENEW", "ADD_DNSSEC", "REMOVE_DNSSEC", "EXPIRE_DOMAIN", "TRANSFER_OUT_DOMAIN", "CHANGE_DOMAIN_OWNER", "RENEW_DOMAIN", "PUSH_DOMAIN", "INTERNAL_TRANSFER_OUT_DOMAIN", "INTERNAL_TRANSFER_IN_DOMAIN", "RELEASE_TO_GANDI", "TRANSFER_ON_RENEW", "RESTORE_DOMAIN" # resp.billing_records[0].invoice_id #=> String # resp.billing_records[0].bill_date #=> Time # resp.billing_records[0].price #=> Float @@ -2831,7 +2831,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-route53domains' - context[:gem_version] = '1.71.0' + context[:gem_version] = '1.72.0' Seahorse::Client::Request.new(handlers, context) end diff --git a/gems/aws-sdk-route53domains/sig/client.rbs b/gems/aws-sdk-route53domains/sig/client.rbs index 647d7835975..c1ca7e74fdb 100644 --- a/gems/aws-sdk-route53domains/sig/client.rbs +++ b/gems/aws-sdk-route53domains/sig/client.rbs @@ -266,7 +266,7 @@ module Aws def status: () -> ("SUBMITTED" | "IN_PROGRESS" | "ERROR" | "SUCCESSFUL" | "FAILED") def message: () -> ::String def domain_name: () -> ::String - def type: () -> ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW") + def type: () -> ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW" | "RESTORE_DOMAIN") def submitted_date: () -> ::Time def last_updated_date: () -> ::Time def status_flag: () -> ("PENDING_ACCEPTANCE" | "PENDING_CUSTOMER_ACTION" | "PENDING_AUTHORIZATION" | "PENDING_PAYMENT_VERIFICATION" | "PENDING_SUPPORT_CASE") @@ -311,7 +311,7 @@ module Aws ?marker: ::String, ?max_items: ::Integer, ?status: Array[("SUBMITTED" | "IN_PROGRESS" | "ERROR" | "SUCCESSFUL" | "FAILED")], - ?type: Array[("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW")], + ?type: Array[("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW" | "RESTORE_DOMAIN")], ?sort_by: ("SubmittedDate"), ?sort_order: ("ASC" | "DESC") ) -> _ListOperationsResponseSuccess diff --git a/gems/aws-sdk-route53domains/sig/types.rbs b/gems/aws-sdk-route53domains/sig/types.rbs index 8659fe2ee82..af69ad6022c 100644 --- a/gems/aws-sdk-route53domains/sig/types.rbs +++ b/gems/aws-sdk-route53domains/sig/types.rbs @@ -32,7 +32,7 @@ module Aws::Route53Domains class BillingRecord attr_accessor domain_name: ::String - attr_accessor operation: ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW") + attr_accessor operation: ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW" | "RESTORE_DOMAIN") attr_accessor invoice_id: ::String attr_accessor bill_date: ::Time attr_accessor price: ::Float @@ -304,7 +304,7 @@ module Aws::Route53Domains attr_accessor status: ("SUBMITTED" | "IN_PROGRESS" | "ERROR" | "SUCCESSFUL" | "FAILED") attr_accessor message: ::String attr_accessor domain_name: ::String - attr_accessor type: ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW") + attr_accessor type: ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW" | "RESTORE_DOMAIN") attr_accessor submitted_date: ::Time attr_accessor last_updated_date: ::Time attr_accessor status_flag: ("PENDING_ACCEPTANCE" | "PENDING_CUSTOMER_ACTION" | "PENDING_AUTHORIZATION" | "PENDING_PAYMENT_VERIFICATION" | "PENDING_SUPPORT_CASE") @@ -335,7 +335,7 @@ module Aws::Route53Domains attr_accessor marker: ::String attr_accessor max_items: ::Integer attr_accessor status: ::Array[("SUBMITTED" | "IN_PROGRESS" | "ERROR" | "SUCCESSFUL" | "FAILED")] - attr_accessor type: ::Array[("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW")] + attr_accessor type: ::Array[("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW" | "RESTORE_DOMAIN")] attr_accessor sort_by: ("SubmittedDate") attr_accessor sort_order: ("ASC" | "DESC") SENSITIVE: [] @@ -384,7 +384,7 @@ module Aws::Route53Domains class OperationSummary attr_accessor operation_id: ::String attr_accessor status: ("SUBMITTED" | "IN_PROGRESS" | "ERROR" | "SUCCESSFUL" | "FAILED") - attr_accessor type: ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW") + attr_accessor type: ("REGISTER_DOMAIN" | "DELETE_DOMAIN" | "TRANSFER_IN_DOMAIN" | "UPDATE_DOMAIN_CONTACT" | "UPDATE_NAMESERVER" | "CHANGE_PRIVACY_PROTECTION" | "DOMAIN_LOCK" | "ENABLE_AUTORENEW" | "DISABLE_AUTORENEW" | "ADD_DNSSEC" | "REMOVE_DNSSEC" | "EXPIRE_DOMAIN" | "TRANSFER_OUT_DOMAIN" | "CHANGE_DOMAIN_OWNER" | "RENEW_DOMAIN" | "PUSH_DOMAIN" | "INTERNAL_TRANSFER_OUT_DOMAIN" | "INTERNAL_TRANSFER_IN_DOMAIN" | "RELEASE_TO_GANDI" | "TRANSFER_ON_RENEW" | "RESTORE_DOMAIN") attr_accessor submitted_date: ::Time attr_accessor domain_name: ::String attr_accessor message: ::String diff --git a/gems/aws-sdk-s3/CHANGELOG.md b/gems/aws-sdk-s3/CHANGELOG.md index f14803f5eb7..78ec7835769 100644 --- a/gems/aws-sdk-s3/CHANGELOG.md +++ b/gems/aws-sdk-s3/CHANGELOG.md @@ -1,6 +1,9 @@ Unreleased Changes ------------------ +1.176.1 (2024-12-12) +------------------ + * Issue - Do not normalize object keys when calling `presigned_url` or `presigned_request`. 1.176.0 (2024-12-03) diff --git a/gems/aws-sdk-s3/VERSION b/gems/aws-sdk-s3/VERSION index ede06c0044c..1d4d8608a92 100644 --- a/gems/aws-sdk-s3/VERSION +++ b/gems/aws-sdk-s3/VERSION @@ -1 +1 @@ -1.176.0 +1.176.1 diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3.rb index 4cf032435e5..8b137f0a045 100644 --- a/gems/aws-sdk-s3/lib/aws-sdk-s3.rb +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3.rb @@ -75,7 +75,7 @@ module Plugins autoload :ObjectVersion, 'aws-sdk-s3/object_version' autoload :EventStreams, 'aws-sdk-s3/event_streams' - GEM_VERSION = '1.176.0' + GEM_VERSION = '1.176.1' end diff --git a/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb b/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb index bfad9bacb86..8701e151cc4 100644 --- a/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb +++ b/gems/aws-sdk-s3/lib/aws-sdk-s3/client.rb @@ -20471,7 +20471,7 @@ def build_request(operation_name, params = {}) tracer: tracer ) context[:gem_name] = 'aws-sdk-s3' - context[:gem_version] = '1.176.0' + context[:gem_version] = '1.176.1' Seahorse::Client::Request.new(handlers, context) end