diff --git a/.changelog/01e55ddb1e8e408ba9a015b11759f931.json b/.changelog/01e55ddb1e8e408ba9a015b11759f931.json new file mode 100644 index 00000000000..519eb7a4cc7 --- /dev/null +++ b/.changelog/01e55ddb1e8e408ba9a015b11759f931.json @@ -0,0 +1,8 @@ +{ + "id": "01e55ddb-1e8e-408b-a9a0-15b11759f931", + "type": "feature", + "description": "Amazon Connect introduces StartOutboundChatContact API allowing customers to initiate outbound chat contacts", + "modules": [ + "service/connect" + ] +} \ No newline at end of file diff --git a/.changelog/20ee572b47c14badaf0f3f1fc6b2ad79.json b/.changelog/20ee572b47c14badaf0f3f1fc6b2ad79.json new file mode 100644 index 00000000000..85506966021 --- /dev/null +++ b/.changelog/20ee572b47c14badaf0f3f1fc6b2ad79.json @@ -0,0 +1,8 @@ +{ + "id": "20ee572b-47c1-4bad-af0f-3f1fc6b2ad79", + "type": "documentation", + "description": "Add examples for API operations in model.", + "modules": [ + "service/verifiedpermissions" + ] +} \ No newline at end of file diff --git a/.changelog/30c54c5c2f2542acb9091bc8707bef09.json b/.changelog/30c54c5c2f2542acb9091bc8707bef09.json new file mode 100644 index 00000000000..2ad49c5f6ff --- /dev/null +++ b/.changelog/30c54c5c2f2542acb9091bc8707bef09.json @@ -0,0 +1,8 @@ +{ + "id": "30c54c5c-2f25-42ac-b909-1bc8707bef09", + "type": "feature", + "description": "Add support for custom models via provisioned throughput for Bedrock Model Evaluation", + "modules": [ + "service/bedrock" + ] +} \ No newline at end of file diff --git a/.changelog/3547471e139e43bbad6a93c1b582849d.json b/.changelog/3547471e139e43bbad6a93c1b582849d.json new file mode 100644 index 00000000000..92ff85245f5 --- /dev/null +++ b/.changelog/3547471e139e43bbad6a93c1b582849d.json @@ -0,0 +1,8 @@ +{ + "id": "3547471e-139e-43bb-ad6a-93c1b582849d", + "type": "feature", + "description": "Release DataLakeDataset, DataIntegrationFlow and ResourceTagging APIs for AWS Supply Chain", + "modules": [ + "service/supplychain" + ] +} \ No newline at end of file diff --git a/.changelog/55f62c65c8ab4f20b09ba825d8eafbf3.json b/.changelog/55f62c65c8ab4f20b09ba825d8eafbf3.json new file mode 100644 index 00000000000..e25d0da949c --- /dev/null +++ b/.changelog/55f62c65c8ab4f20b09ba825d8eafbf3.json @@ -0,0 +1,8 @@ +{ + "id": "55f62c65-c8ab-4f20-b09b-a825d8eafbf3", + "type": "documentation", + "description": "Add examples for API operations in model.", + "modules": [ + "service/clouddirectory" + ] +} \ No newline at end of file diff --git a/.changelog/a619d080c6b34c76b81c823557911b5c.json b/.changelog/a619d080c6b34c76b81c823557911b5c.json new file mode 100644 index 00000000000..75ce195699e --- /dev/null +++ b/.changelog/a619d080c6b34c76b81c823557911b5c.json @@ -0,0 +1,8 @@ +{ + "id": "a619d080-c6b3-4c76-b81c-823557911b5c", + "type": "feature", + "description": "This update includes new APIs to support application groups and to allow users to manage resource tag-sync tasks in applications.", + "modules": [ + "service/resourcegroups" + ] +} \ No newline at end of file diff --git a/.changelog/b5c9eb00d40243279059c6f6acab4a8f.json b/.changelog/b5c9eb00d40243279059c6f6acab4a8f.json new file mode 100644 index 00000000000..d543755311f --- /dev/null +++ b/.changelog/b5c9eb00d40243279059c6f6acab4a8f.json @@ -0,0 +1,8 @@ +{ + "id": "b5c9eb00-d402-4327-9059-c6f6acab4a8f", + "type": "feature", + "description": "Timestream for InfluxDB now supports port configuration and additional customer-modifiable InfluxDB v2 parameters. This release adds Port to the CreateDbInstance and UpdateDbInstance API, and additional InfluxDB v2 parameters to the CreateDbParameterGroup API.", + "modules": [ + "service/timestreaminfluxdb" + ] +} \ No newline at end of file diff --git a/.changelog/fb997783c2ee4003ae4e7a217df39b4f.json b/.changelog/fb997783c2ee4003ae4e7a217df39b4f.json new file mode 100644 index 00000000000..72118ed82b3 --- /dev/null +++ b/.changelog/fb997783c2ee4003ae4e7a217df39b4f.json @@ -0,0 +1,8 @@ +{ + "id": "fb997783-c2ee-4003-ae4e-7a217df39b4f", + "type": "documentation", + "description": "Add examples for API operations in model.", + "modules": [ + "service/pricing" + ] +} \ No newline at end of file diff --git a/feature/dynamodbstreams/attributevalue/go_module_metadata.go b/feature/dynamodbstreams/attributevalue/go_module_metadata.go index b02b4e09cad..113a2f3cd9a 100644 --- a/feature/dynamodbstreams/attributevalue/go_module_metadata.go +++ b/feature/dynamodbstreams/attributevalue/go_module_metadata.go @@ -3,4 +3,4 @@ package attributevalue // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.23" +const goModuleVersion = "1.15.8" diff --git a/service/clouddirectory/endpoints.go b/service/clouddirectory/endpoints.go index d1e98b56e02..26b04c5b9c3 100644 --- a/service/clouddirectory/endpoints.go +++ b/service/clouddirectory/endpoints.go @@ -385,8 +385,8 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { - if "aws-us-gov" == _PartitionResult.Name { + if _PartitionResult.SupportsFIPS == true { + if _PartitionResult.Name == "aws-us-gov" { uriString := func() string { var out strings.Builder out.WriteString("https://clouddirectory.") diff --git a/service/connect/api_op_AssociateAnalyticsDataSet.go b/service/connect/api_op_AssociateAnalyticsDataSet.go index 85ebcc4ec07..e5712e67e32 100644 --- a/service/connect/api_op_AssociateAnalyticsDataSet.go +++ b/service/connect/api_op_AssociateAnalyticsDataSet.go @@ -10,8 +10,6 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This API is in preview release for Amazon Connect and is subject to change. -// // Associates the specified dataset for a Amazon Connect instance with the target // account. You can associate only one dataset in a single call. func (c *Client) AssociateAnalyticsDataSet(ctx context.Context, params *AssociateAnalyticsDataSetInput, optFns ...func(*Options)) (*AssociateAnalyticsDataSetOutput, error) { diff --git a/service/connect/api_op_BatchAssociateAnalyticsDataSet.go b/service/connect/api_op_BatchAssociateAnalyticsDataSet.go index 4381378db53..a6a26fa4d79 100644 --- a/service/connect/api_op_BatchAssociateAnalyticsDataSet.go +++ b/service/connect/api_op_BatchAssociateAnalyticsDataSet.go @@ -11,8 +11,6 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This API is in preview release for Amazon Connect and is subject to change. -// // Associates a list of analytics datasets for a given Amazon Connect instance to // a target account. You can associate multiple datasets in a single call. func (c *Client) BatchAssociateAnalyticsDataSet(ctx context.Context, params *BatchAssociateAnalyticsDataSetInput, optFns ...func(*Options)) (*BatchAssociateAnalyticsDataSetOutput, error) { diff --git a/service/connect/api_op_BatchDisassociateAnalyticsDataSet.go b/service/connect/api_op_BatchDisassociateAnalyticsDataSet.go index 9ba4ecb852c..dd320ed7b78 100644 --- a/service/connect/api_op_BatchDisassociateAnalyticsDataSet.go +++ b/service/connect/api_op_BatchDisassociateAnalyticsDataSet.go @@ -11,8 +11,6 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This API is in preview release for Amazon Connect and is subject to change. -// // Removes a list of analytics datasets associated with a given Amazon Connect // instance. You can disassociate multiple datasets in a single call. func (c *Client) BatchDisassociateAnalyticsDataSet(ctx context.Context, params *BatchDisassociateAnalyticsDataSetInput, optFns ...func(*Options)) (*BatchDisassociateAnalyticsDataSetOutput, error) { diff --git a/service/connect/api_op_DescribeContact.go b/service/connect/api_op_DescribeContact.go index 21a4b90e72d..f88431de702 100644 --- a/service/connect/api_op_DescribeContact.go +++ b/service/connect/api_op_DescribeContact.go @@ -15,10 +15,9 @@ import ( // // Describes the specified contact. // -// Contact information remains available in Amazon Connect for 24 months, and then -// it is deleted. -// -// Only data from November 12, 2021, and later is returned by this API. +// Contact information remains available in Amazon Connect for 24 months from the +// InitiationTimestamp, and then it is deleted. Only contact information that is +// available in Amazon Connect is returned by this API func (c *Client) DescribeContact(ctx context.Context, params *DescribeContactInput, optFns ...func(*Options)) (*DescribeContactOutput, error) { if params == nil { params = &DescribeContactInput{} diff --git a/service/connect/api_op_DisassociateAnalyticsDataSet.go b/service/connect/api_op_DisassociateAnalyticsDataSet.go index e55d8b15ad4..6168072dfab 100644 --- a/service/connect/api_op_DisassociateAnalyticsDataSet.go +++ b/service/connect/api_op_DisassociateAnalyticsDataSet.go @@ -10,8 +10,6 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This API is in preview release for Amazon Connect and is subject to change. -// // Removes the dataset ID associated with a given Amazon Connect instance. func (c *Client) DisassociateAnalyticsDataSet(ctx context.Context, params *DisassociateAnalyticsDataSetInput, optFns ...func(*Options)) (*DisassociateAnalyticsDataSetOutput, error) { if params == nil { diff --git a/service/connect/api_op_GetMetricDataV2.go b/service/connect/api_op_GetMetricDataV2.go index 6be0d8829ce..8b2a8ddbee2 100644 --- a/service/connect/api_op_GetMetricDataV2.go +++ b/service/connect/api_op_GetMetricDataV2.go @@ -571,7 +571,8 @@ type GetMetricDataV2Input struct { // Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect // // Threshold: For ThresholdValue , enter any whole number from 1 to 604800 - // (inclusive), in seconds. For Comparison , you must enter LT (for "Less than"). + // (inclusive), in seconds. For Comparison , you can use LT (for "Less than") or + // LTE (for "Less than equal"). // // UI name: [Contacts removed from queue in X seconds] // @@ -581,7 +582,8 @@ type GetMetricDataV2Input struct { // contact/segmentAttributes/connect:Subtype, Q in Connect // // Threshold: For ThresholdValue enter any whole number from 1 to 604800 - // (inclusive), in seconds. For Comparison , you must enter LT (for "Less than"). + // (inclusive), in seconds. For Comparison , you can use LT (for "Less than") or + // LTE (for "Less than equal"). // // UI name: [Contacts resolved in X] // @@ -793,7 +795,8 @@ type GetMetricDataV2Input struct { // Valid groupings and filters: Queue, Channel, Routing Profile, Q in Connect // // Threshold: For ThresholdValue , enter any whole number from 1 to 604800 - // (inclusive), in seconds. For Comparison , you must enter LT (for "Less than"). + // (inclusive), in seconds. For Comparison , you can use LT (for "Less than") or + // LTE (for "Less than equal"). // // UI name: [Service level X] // @@ -842,7 +845,8 @@ type GetMetricDataV2Input struct { // contact/segmentAttributes/connect:Subtype, Q in Connect // // Threshold: For ThresholdValue , enter any whole number from 1 to 604800 - // (inclusive), in seconds. For Comparison , you must enter LT (for "Less than"). + // (inclusive), in seconds. For Comparison , you can use LT (for "Less than") or + // LTE (for "Less than equal"). // // UI name: [Contacts abandoned in X seconds] // @@ -852,7 +856,8 @@ type GetMetricDataV2Input struct { // contact/segmentAttributes/connect:Subtype, Q in Connect // // Threshold: For ThresholdValue , enter any whole number from 1 to 604800 - // (inclusive), in seconds. For Comparison , you must enter LT (for "Less than"). + // (inclusive), in seconds. For Comparison , you can use LT (for "Less than") or + // LTE (for "Less than equal"). // // UI name: [Contacts answered in X seconds] // diff --git a/service/connect/api_op_ListAnalyticsDataAssociations.go b/service/connect/api_op_ListAnalyticsDataAssociations.go index 5f49e3a50f5..1b95711a4a8 100644 --- a/service/connect/api_op_ListAnalyticsDataAssociations.go +++ b/service/connect/api_op_ListAnalyticsDataAssociations.go @@ -11,8 +11,6 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This API is in preview release for Amazon Connect and is subject to change. -// // Lists the association status of requested dataset ID for a given Amazon Connect // instance. func (c *Client) ListAnalyticsDataAssociations(ctx context.Context, params *ListAnalyticsDataAssociationsInput, optFns ...func(*Options)) (*ListAnalyticsDataAssociationsOutput, error) { diff --git a/service/connect/api_op_StartOutboundChatContact.go b/service/connect/api_op_StartOutboundChatContact.go new file mode 100644 index 00000000000..f4f045beefd --- /dev/null +++ b/service/connect/api_op_StartOutboundChatContact.go @@ -0,0 +1,298 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package connect + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/connect/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Initiates a new outbound SMS contact to a customer. Response of this API +// provides the ContactId of the outbound SMS contact created. +// +// SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN as Type and +// DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as Type. +// ContactFlowId initiates the flow to manage the new SMS contact created. +// +// This API can be used to initiate outbound SMS contacts for an agent or it can +// also deflect an ongoing contact to an outbound SMS contact by using the [StartOutboundChatContact]Flow +// Action. +// +// For more information about using SMS in Amazon Connect, see the following +// topics in the Amazon Connect Administrator Guide: +// +// [Set up SMS messaging] +// +// [Request an SMS-enabled phone number through AWS End User Messaging SMS] +// +// [Set up SMS messaging]: https://docs.aws.amazon.com/connect/latest/adminguide/setup-sms-messaging.html +// [Request an SMS-enabled phone number through AWS End User Messaging SMS]: https://docs.aws.amazon.com/connect/latest/adminguide/sms-number.html +// [StartOutboundChatContact]: https://docs.aws.amazon.com/connect/latest/APIReference/API_StartOutboundChatContact.html +func (c *Client) StartOutboundChatContact(ctx context.Context, params *StartOutboundChatContactInput, optFns ...func(*Options)) (*StartOutboundChatContactOutput, error) { + if params == nil { + params = &StartOutboundChatContactInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartOutboundChatContact", params, optFns, c.addOperationStartOutboundChatContactMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartOutboundChatContactOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartOutboundChatContactInput struct { + + // The identifier of the flow for the call. To see the ContactFlowId in the Amazon + // Connect console user interface, on the navigation menu go to Routing, Contact + // Flows. Choose the flow. On the flow page, under the name of the flow, choose + // Show additional flow information. The ContactFlowId is the last part of the ARN, + // shown here in bold: + // + // - + // arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact-flow/123ec456-a007-89c0-1234-xxxxxxxxxxxx + // + // This member is required. + ContactFlowId *string + + // Information about the endpoint. + // + // This member is required. + DestinationEndpoint *types.Endpoint + + // The identifier of the Amazon Connect instance. You can find the instance ID in + // the Amazon Resource Name (ARN) of the instance. + // + // This member is required. + InstanceId *string + + // A set of system defined key-value pairs stored on individual contact segments + // using an attribute map. The attributes are standard Amazon Connect attributes. + // They can be accessed in flows. + // + // - Attribute keys can include only alphanumeric, - , and _ . + // + // - This field can be used to show channel subtype, such as connect:Guide and + // connect:SMS . + // + // This member is required. + SegmentAttributes map[string]types.SegmentAttributeValue + + // Information about the endpoint. + // + // This member is required. + SourceEndpoint *types.Endpoint + + // A custom key-value pair using an attribute map. The attributes are standard + // Amazon Connect attributes, and can be accessed in flows just like any other + // contact attributes. + Attributes map[string]string + + // The total duration of the newly started chat session. If not specified, the + // chat session duration defaults to 25 hour. The minimum configurable time is 60 + // minutes. The maximum configurable time is 10,080 minutes (7 days). + ChatDurationInMinutes *int32 + + // A unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. If not provided, the AWS SDK populates this field. For more + // information about idempotency, see [Making retries safe with idempotent APIs]. The token is valid for 7 days after + // creation. If a contact is already started, the contact ID is returned. + // + // [Making retries safe with idempotent APIs]: https://aws.amazon.com/builders-library/making-retries-safe-with-idempotent-APIs/ + ClientToken *string + + // A chat message. + InitialSystemMessage *types.ChatMessage + + // The customer's details. + ParticipantDetails *types.ParticipantDetails + + // The unique identifier for an Amazon Connect contact. This identifier is related + // to the contact starting. + RelatedContactId *string + + // The supported chat message content types. Supported types are: + // + // - text/plain + // + // - text/markdown + // + // - application/json, application/vnd.amazonaws.connect.message.interactive + // + // - application/vnd.amazonaws.connect.message.interactive.response + // + // Content types must always contain text/plain . You can then put any other + // supported type in the list. For example, all the following lists are valid + // because they contain text/plain : + // + // - [text/plain, text/markdown, application/json] + // + // - [text/markdown, text/plain] + // + // - [text/plain, application/json, + // application/vnd.amazonaws.connect.message.interactive.response] + SupportedMessagingContentTypes []string + + noSmithyDocumentSerde +} + +type StartOutboundChatContactOutput struct { + + // The identifier of this contact within the Amazon Connect instance. + ContactId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartOutboundChatContactMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartOutboundChatContact{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartOutboundChatContact{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartOutboundChatContact"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opStartOutboundChatContactMiddleware(stack, options); err != nil { + return err + } + if err = addOpStartOutboundChatContactValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartOutboundChatContact(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +type idempotencyToken_initializeOpStartOutboundChatContact struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpStartOutboundChatContact) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpStartOutboundChatContact) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*StartOutboundChatContactInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *StartOutboundChatContactInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opStartOutboundChatContactMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpStartOutboundChatContact{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + +func newServiceMetadataMiddleware_opStartOutboundChatContact(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartOutboundChatContact", + } +} diff --git a/service/connect/deserializers.go b/service/connect/deserializers.go index 13d48e01cb7..b9fa5b3a9ff 100644 --- a/service/connect/deserializers.go +++ b/service/connect/deserializers.go @@ -30736,6 +30736,180 @@ func awsRestjson1_deserializeOpDocumentStartContactStreamingOutput(v **StartCont return nil } +type awsRestjson1_deserializeOpStartOutboundChatContact struct { +} + +func (*awsRestjson1_deserializeOpStartOutboundChatContact) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartOutboundChatContact) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartOutboundChatContact(response, &metadata) + } + output := &StartOutboundChatContactOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartOutboundChatContactOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartOutboundChatContact(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServiceException", errorCode): + return awsRestjson1_deserializeErrorInternalServiceException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsRestjson1_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartOutboundChatContactOutput(v **StartOutboundChatContactOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartOutboundChatContactOutput + if *v == nil { + sv = &StartOutboundChatContactOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ContactId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ContactId to be of type string, got %T instead", value) + } + sv.ContactId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpStartOutboundVoiceContact struct { } diff --git a/service/connect/generated.json b/service/connect/generated.json index 011155b6ae6..91ca0cc07f6 100644 --- a/service/connect/generated.json +++ b/service/connect/generated.json @@ -203,6 +203,7 @@ "api_op_StartContactEvaluation.go", "api_op_StartContactRecording.go", "api_op_StartContactStreaming.go", + "api_op_StartOutboundChatContact.go", "api_op_StartOutboundVoiceContact.go", "api_op_StartTaskContact.go", "api_op_StartWebRTCContact.go", diff --git a/service/connect/serializers.go b/service/connect/serializers.go index fa0b02e8bf8..69ec5294a84 100644 --- a/service/connect/serializers.go +++ b/service/connect/serializers.go @@ -18220,6 +18220,156 @@ func awsRestjson1_serializeOpDocumentStartContactStreamingInput(v *StartContactS return nil } +type awsRestjson1_serializeOpStartOutboundChatContact struct { +} + +func (*awsRestjson1_serializeOpStartOutboundChatContact) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartOutboundChatContact) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartOutboundChatContactInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/contact/outbound-chat") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartOutboundChatContactInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartOutboundChatContactInput(v *StartOutboundChatContactInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartOutboundChatContactInput(v *StartOutboundChatContactInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Attributes != nil { + ok := object.Key("Attributes") + if err := awsRestjson1_serializeDocumentAttributes(v.Attributes, ok); err != nil { + return err + } + } + + if v.ChatDurationInMinutes != nil { + ok := object.Key("ChatDurationInMinutes") + ok.Integer(*v.ChatDurationInMinutes) + } + + if v.ClientToken != nil { + ok := object.Key("ClientToken") + ok.String(*v.ClientToken) + } + + if v.ContactFlowId != nil { + ok := object.Key("ContactFlowId") + ok.String(*v.ContactFlowId) + } + + if v.DestinationEndpoint != nil { + ok := object.Key("DestinationEndpoint") + if err := awsRestjson1_serializeDocumentEndpoint(v.DestinationEndpoint, ok); err != nil { + return err + } + } + + if v.InitialSystemMessage != nil { + ok := object.Key("InitialSystemMessage") + if err := awsRestjson1_serializeDocumentChatMessage(v.InitialSystemMessage, ok); err != nil { + return err + } + } + + if v.InstanceId != nil { + ok := object.Key("InstanceId") + ok.String(*v.InstanceId) + } + + if v.ParticipantDetails != nil { + ok := object.Key("ParticipantDetails") + if err := awsRestjson1_serializeDocumentParticipantDetails(v.ParticipantDetails, ok); err != nil { + return err + } + } + + if v.RelatedContactId != nil { + ok := object.Key("RelatedContactId") + ok.String(*v.RelatedContactId) + } + + if v.SegmentAttributes != nil { + ok := object.Key("SegmentAttributes") + if err := awsRestjson1_serializeDocumentSegmentAttributes(v.SegmentAttributes, ok); err != nil { + return err + } + } + + if v.SourceEndpoint != nil { + ok := object.Key("SourceEndpoint") + if err := awsRestjson1_serializeDocumentEndpoint(v.SourceEndpoint, ok); err != nil { + return err + } + } + + if v.SupportedMessagingContentTypes != nil { + ok := object.Key("SupportedMessagingContentTypes") + if err := awsRestjson1_serializeDocumentSupportedMessagingContentTypes(v.SupportedMessagingContentTypes, ok); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpStartOutboundVoiceContact struct { } diff --git a/service/connect/snapshot/api_op_StartOutboundChatContact.go.snap b/service/connect/snapshot/api_op_StartOutboundChatContact.go.snap new file mode 100644 index 00000000000..123f3a4777b --- /dev/null +++ b/service/connect/snapshot/api_op_StartOutboundChatContact.go.snap @@ -0,0 +1,42 @@ +StartOutboundChatContact + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + OperationIdempotencyTokenAutoFill + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/connect/snapshot_test.go b/service/connect/snapshot_test.go index 4c8ecc33c7e..2d46a1e2fbd 100644 --- a/service/connect/snapshot_test.go +++ b/service/connect/snapshot_test.go @@ -2402,6 +2402,18 @@ func TestCheckSnapshot_StartContactStreaming(t *testing.T) { } } +func TestCheckSnapshot_StartOutboundChatContact(t *testing.T) { + svc := New(Options{}) + _, err := svc.StartOutboundChatContact(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "StartOutboundChatContact") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_StartOutboundVoiceContact(t *testing.T) { svc := New(Options{}) _, err := svc.StartOutboundVoiceContact(context.Background(), nil, func(o *Options) { @@ -5461,6 +5473,18 @@ func TestUpdateSnapshot_StartContactStreaming(t *testing.T) { } } +func TestUpdateSnapshot_StartOutboundChatContact(t *testing.T) { + svc := New(Options{}) + _, err := svc.StartOutboundChatContact(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "StartOutboundChatContact") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_StartOutboundVoiceContact(t *testing.T) { svc := New(Options{}) _, err := svc.StartOutboundVoiceContact(context.Background(), nil, func(o *Options) { diff --git a/service/connect/types/enums.go b/service/connect/types/enums.go index fe6b5112a38..1d828a931cc 100644 --- a/service/connect/types/enums.go +++ b/service/connect/types/enums.go @@ -515,9 +515,10 @@ type EndpointType string // Enum values for EndpointType const ( - EndpointTypeTelephoneNumber EndpointType = "TELEPHONE_NUMBER" - EndpointTypeVoip EndpointType = "VOIP" - EndpointTypeContactFlow EndpointType = "CONTACT_FLOW" + EndpointTypeTelephoneNumber EndpointType = "TELEPHONE_NUMBER" + EndpointTypeVoip EndpointType = "VOIP" + EndpointTypeContactFlow EndpointType = "CONTACT_FLOW" + EndpointTypeConnectPhonenumberArn EndpointType = "CONNECT_PHONENUMBER_ARN" ) // Values returns all known values for EndpointType. Note that this can be @@ -529,6 +530,7 @@ func (EndpointType) Values() []EndpointType { "TELEPHONE_NUMBER", "VOIP", "CONTACT_FLOW", + "CONNECT_PHONENUMBER_ARN", } } diff --git a/service/connect/types/types.go b/service/connect/types/types.go index 56633857731..a03d78cf7cc 100644 --- a/service/connect/types/types.go +++ b/service/connect/types/types.go @@ -3409,7 +3409,8 @@ type MetricFilterV2 struct { // The values to use for filtering data. // // Valid metric filter values for INITIATION_METHOD : INBOUND | OUTBOUND | TRANSFER - // | QUEUE_TRANSFER | CALLBACK | API + // | QUEUE_TRANSFER | CALLBACK | API | WEBRTC_API | MONITOR | DISCONNECT | + // EXTERNAL_OUTBOUND // // Valid metric filter values for DISCONNECT_REASON : CUSTOMER_DISCONNECT | // AGENT_DISCONNECT | THIRD_PARTY_DISCONNECT | TELECOM_PROBLEM | BARGED | @@ -3562,7 +3563,7 @@ type NumberReference struct { // // - Duration labels, such as NON_TALK_TIME , CONTACT_DURATION , // AGENT_INTERACTION_DURATION , CUSTOMER_HOLD_TIME have a minimum value of 0 and -// maximum value of 28800. +// maximum value of 63072000. // // - Percentages have a minimum value of 0 and maximum value of 100. // @@ -5873,8 +5874,8 @@ type Threshold struct { // Contains information about the threshold for service level metrics. type ThresholdV2 struct { - // The type of comparison. Only "less than" (LT) and "greater than" (GT) - // comparisons are supported. + // The type of comparison. Currently, "less than" (LT), "less than equal" (LTE), + // and "greater than" (GT) comparisons are supported. Comparison *string // The threshold value to compare. diff --git a/service/connect/validators.go b/service/connect/validators.go index 911b3230266..a28fe9c7117 100644 --- a/service/connect/validators.go +++ b/service/connect/validators.go @@ -3850,6 +3850,26 @@ func (m *validateOpStartContactStreaming) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpStartOutboundChatContact struct { +} + +func (*validateOpStartOutboundChatContact) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartOutboundChatContact) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartOutboundChatContactInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartOutboundChatContactInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpStartOutboundVoiceContact struct { } @@ -5818,6 +5838,10 @@ func addOpStartContactStreamingValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpStartContactStreaming{}, middleware.After) } +func addOpStartOutboundChatContactValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartOutboundChatContact{}, middleware.After) +} + func addOpStartOutboundVoiceContactValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpStartOutboundVoiceContact{}, middleware.After) } @@ -11387,6 +11411,43 @@ func validateOpStartContactStreamingInput(v *StartContactStreamingInput) error { } } +func validateOpStartOutboundChatContactInput(v *StartOutboundChatContactInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartOutboundChatContactInput"} + if v.SourceEndpoint == nil { + invalidParams.Add(smithy.NewErrParamRequired("SourceEndpoint")) + } + if v.DestinationEndpoint == nil { + invalidParams.Add(smithy.NewErrParamRequired("DestinationEndpoint")) + } + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.SegmentAttributes == nil { + invalidParams.Add(smithy.NewErrParamRequired("SegmentAttributes")) + } + if v.ContactFlowId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ContactFlowId")) + } + if v.ParticipantDetails != nil { + if err := validateParticipantDetails(v.ParticipantDetails); err != nil { + invalidParams.AddNested("ParticipantDetails", err.(smithy.InvalidParamsError)) + } + } + if v.InitialSystemMessage != nil { + if err := validateChatMessage(v.InitialSystemMessage); err != nil { + invalidParams.AddNested("InitialSystemMessage", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpStartOutboundVoiceContactInput(v *StartOutboundVoiceContactInput) error { if v == nil { return nil diff --git a/service/pricing/doc.go b/service/pricing/doc.go index fe9afd3369e..ebd63245655 100644 --- a/service/pricing/doc.go +++ b/service/pricing/doc.go @@ -20,12 +20,12 @@ // Amazon Web Services // // Use GetServices without a service code to retrieve the service codes for all -// Amazon Web Services, then GetServices with a service code to retrieve the -// attribute names for that service. After you have the service code and attribute -// names, you can use GetAttributeValues to see what values are available for an -// attribute. With the service code and an attribute name and value, you can use -// GetProducts to find specific products that you're interested in, such as an -// AmazonEC2 instance, with a Provisioned IOPS volumeType . +// Amazon Web Services services, then GetServices with a service code to retrieve +// the attribute names for that service. After you have the service code and +// attribute names, you can use GetAttributeValues to see what values are +// available for an attribute. With the service code and an attribute name and +// value, you can use GetProducts to find specific products that you're interested +// in, such as an AmazonEC2 instance, with a Provisioned IOPS volumeType . // // For more information, see [Using the Amazon Web Services Price List API] in the Billing User Guide. // diff --git a/service/resourcegroups/api_op_CancelTagSyncTask.go b/service/resourcegroups/api_op_CancelTagSyncTask.go new file mode 100644 index 00000000000..5a5b92679aa --- /dev/null +++ b/service/resourcegroups/api_op_CancelTagSyncTask.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package resourcegroups + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Cancels the specified tag-sync task. +// +// # Minimum permissions +// +// To run this command, you must have the following permissions: +// +// - resource-groups:CancelTagSyncTask on the application group +// +// - resource-groups:DeleteGroup +func (c *Client) CancelTagSyncTask(ctx context.Context, params *CancelTagSyncTaskInput, optFns ...func(*Options)) (*CancelTagSyncTaskOutput, error) { + if params == nil { + params = &CancelTagSyncTaskInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CancelTagSyncTask", params, optFns, c.addOperationCancelTagSyncTaskMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CancelTagSyncTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CancelTagSyncTaskInput struct { + + // The Amazon resource name (ARN) of the tag-sync task. + // + // This member is required. + TaskArn *string + + noSmithyDocumentSerde +} + +type CancelTagSyncTaskOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCancelTagSyncTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCancelTagSyncTask{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCancelTagSyncTask{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CancelTagSyncTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCancelTagSyncTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelTagSyncTask(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCancelTagSyncTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CancelTagSyncTask", + } +} diff --git a/service/resourcegroups/api_op_CreateGroup.go b/service/resourcegroups/api_op_CreateGroup.go index 265c4461676..2e41acafca6 100644 --- a/service/resourcegroups/api_op_CreateGroup.go +++ b/service/resourcegroups/api_op_CreateGroup.go @@ -63,10 +63,21 @@ type CreateGroupInput struct { // [Service configurations for Resource Groups]: https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html Configuration []types.GroupConfigurationItem + // The critical rank of the application group on a scale of 1 to 10, with a rank + // of 1 being the most critical, and a rank of 10 being least critical. + Criticality *int32 + // The description of the resource group. Descriptions can consist of letters, // numbers, hyphens, underscores, periods, and spaces. Description *string + // The name of the application group, which you can change at any time. + DisplayName *string + + // A name, email address or other identifier for the person or group who is + // considered as the owner of this application group within your organization. + Owner *string + // The resource query that determines which Amazon Web Services resources are // members of this group. For more information about resource queries, see [Create a tag-based group in Resource Groups]. // diff --git a/service/resourcegroups/api_op_DeleteGroup.go b/service/resourcegroups/api_op_DeleteGroup.go index 48b0ecc8b8b..ea77fdaf4a6 100644 --- a/service/resourcegroups/api_op_DeleteGroup.go +++ b/service/resourcegroups/api_op_DeleteGroup.go @@ -37,7 +37,7 @@ func (c *Client) DeleteGroup(ctx context.Context, params *DeleteGroupInput, optF type DeleteGroupInput struct { - // The name or the ARN of the resource group to delete. + // The name or the Amazon resource name (ARN) of the resource group to delete. Group *string // Deprecated - don't use this parameter. Use Group instead. diff --git a/service/resourcegroups/api_op_GetGroup.go b/service/resourcegroups/api_op_GetGroup.go index 3011037b03f..82c798db34c 100644 --- a/service/resourcegroups/api_op_GetGroup.go +++ b/service/resourcegroups/api_op_GetGroup.go @@ -35,7 +35,7 @@ func (c *Client) GetGroup(ctx context.Context, params *GetGroupInput, optFns ... type GetGroupInput struct { - // The name or the ARN of the resource group to retrieve. + // The name or the Amazon resource name (ARN) of the resource group to retrieve. Group *string // Deprecated - don't use this parameter. Use Group instead. diff --git a/service/resourcegroups/api_op_GetGroupConfiguration.go b/service/resourcegroups/api_op_GetGroupConfiguration.go index a2207f57c13..ebd061535d7 100644 --- a/service/resourcegroups/api_op_GetGroupConfiguration.go +++ b/service/resourcegroups/api_op_GetGroupConfiguration.go @@ -38,8 +38,8 @@ func (c *Client) GetGroupConfiguration(ctx context.Context, params *GetGroupConf type GetGroupConfigurationInput struct { - // The name or the ARN of the resource group for which you want to retrive the - // service configuration. + // The name or the Amazon resource name (ARN) of the resource group for which you + // want to retrive the service configuration. Group *string noSmithyDocumentSerde diff --git a/service/resourcegroups/api_op_GetGroupQuery.go b/service/resourcegroups/api_op_GetGroupQuery.go index 1b2f71a5057..5efae398796 100644 --- a/service/resourcegroups/api_op_GetGroupQuery.go +++ b/service/resourcegroups/api_op_GetGroupQuery.go @@ -38,7 +38,7 @@ func (c *Client) GetGroupQuery(ctx context.Context, params *GetGroupQueryInput, type GetGroupQueryInput struct { - // The name or the ARN of the resource group to query. + // The name or the Amazon resource name (ARN) of the resource group to query. Group *string // Don't use this parameter. Use Group instead. diff --git a/service/resourcegroups/api_op_GetTagSyncTask.go b/service/resourcegroups/api_op_GetTagSyncTask.go new file mode 100644 index 00000000000..b9908ccf26a --- /dev/null +++ b/service/resourcegroups/api_op_GetTagSyncTask.go @@ -0,0 +1,203 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package resourcegroups + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/resourcegroups/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns information about a specified tag-sync task. +// +// # Minimum permissions +// +// To run this command, you must have the following permissions: +// +// - resource-groups:GetTagSyncTask on the application group +func (c *Client) GetTagSyncTask(ctx context.Context, params *GetTagSyncTaskInput, optFns ...func(*Options)) (*GetTagSyncTaskOutput, error) { + if params == nil { + params = &GetTagSyncTaskInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetTagSyncTask", params, optFns, c.addOperationGetTagSyncTaskMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetTagSyncTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetTagSyncTaskInput struct { + + // The Amazon resource name (ARN) of the tag-sync task. + // + // This member is required. + TaskArn *string + + noSmithyDocumentSerde +} + +type GetTagSyncTaskOutput struct { + + // The timestamp of when the tag-sync task was created. + CreatedAt *time.Time + + // The specific error message in cases where the tag-sync task status is ERROR . + ErrorMessage *string + + // The Amazon resource name (ARN) of the application group. + GroupArn *string + + // The name of the application group. + GroupName *string + + // The Amazon resource name (ARN) of the role assumed by Resource Groups to tag + // and untag resources on your behalf. + // + // For more information about this role, review [Tag-sync required permissions]. + // + // [Tag-sync required permissions]: https://docs.aws.amazon.com/servicecatalog/latest/arguide/app-tag-sync.html#tag-sync-role + RoleArn *string + + // The status of the tag-sync task. + // + // Valid values include: + // + // - ACTIVE - The tag-sync task is actively managing resources in the application + // by adding or removing the awsApplication tag from resources when they are + // tagged or untagged with the specified tag key-value pair. + // + // - ERROR - The tag-sync task is not actively managing resources in the + // application. Review the ErrorMessage for more information about resolving the + // error. + Status types.TagSyncTaskStatus + + // The tag key. + TagKey *string + + // The tag value. + TagValue *string + + // The Amazon resource name (ARN) of the tag-sync task. + TaskArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetTagSyncTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetTagSyncTask{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetTagSyncTask{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetTagSyncTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetTagSyncTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetTagSyncTask(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetTagSyncTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetTagSyncTask", + } +} diff --git a/service/resourcegroups/api_op_GetTags.go b/service/resourcegroups/api_op_GetTags.go index ba7f21755d3..ccb99a719a6 100644 --- a/service/resourcegroups/api_op_GetTags.go +++ b/service/resourcegroups/api_op_GetTags.go @@ -11,7 +11,7 @@ import ( ) // Returns a list of tags that are associated with a resource group, specified by -// an ARN. +// an Amazon resource name (ARN). // // # Minimum permissions // @@ -35,7 +35,8 @@ func (c *Client) GetTags(ctx context.Context, params *GetTagsInput, optFns ...fu type GetTagsInput struct { - // The ARN of the resource group whose tags you want to retrieve. + // The Amazon resource name (ARN) of the resource group whose tags you want to + // retrieve. // // This member is required. Arn *string @@ -45,7 +46,7 @@ type GetTagsInput struct { type GetTagsOutput struct { - // The ARN of the tagged resource group. + // TheAmazon resource name (ARN) of the tagged resource group. Arn *string // The tags associated with the specified resource group. diff --git a/service/resourcegroups/api_op_GroupResources.go b/service/resourcegroups/api_op_GroupResources.go index 9a701c9650f..4171873c3b8 100644 --- a/service/resourcegroups/api_op_GroupResources.go +++ b/service/resourcegroups/api_op_GroupResources.go @@ -13,15 +13,16 @@ import ( // Adds the specified resources to the specified group. // -// You can use this operation with only resource groups that are configured with -// the following types: +// You can only use this operation with the following groups: // // - AWS::EC2::HostManagement // // - AWS::EC2::CapacityReservationPool // -// Other resource group type and resource types aren't currently supported by this -// operation. +// - AWS::ResourceGroups::ApplicationGroup +// +// Other resource group types and resource types are not currently supported by +// this operation. // // # Minimum permissions // @@ -45,12 +46,14 @@ func (c *Client) GroupResources(ctx context.Context, params *GroupResourcesInput type GroupResourcesInput struct { - // The name or the ARN of the resource group to add resources to. + // The name or the Amazon resource name (ARN) of the resource group to add + // resources to. // // This member is required. Group *string - // The list of ARNs of the resources to be added to the group. + // The list of Amazon resource names (ARNs) of the resources to be added to the + // group. // // This member is required. ResourceArns []string @@ -60,18 +63,19 @@ type GroupResourcesInput struct { type GroupResourcesOutput struct { - // A list of ARNs of any resources that this operation failed to add to the group. + // A list of Amazon resource names (ARNs) of any resources that this operation + // failed to add to the group. Failed []types.FailedResource - // A list of ARNs of any resources that this operation is still in the process - // adding to the group. These pending additions continue asynchronously. You can - // check the status of pending additions by using the ListGroupResourcesoperation, and checking the - // Resources array in the response and the Status field of each object in that - // array. + // A list of Amazon resource names (ARNs) of any resources that this operation is + // still in the process adding to the group. These pending additions continue + // asynchronously. You can check the status of pending additions by using the ListGroupResources + // operation, and checking the Resources array in the response and the Status + // field of each object in that array. Pending []types.PendingResource - // A list of ARNs of the resources that this operation successfully added to the - // group. + // A list of Amazon resource names (ARNs) of the resources that this operation + // successfully added to the group. Succeeded []string // Metadata pertaining to the operation's result. diff --git a/service/resourcegroups/api_op_ListGroupResources.go b/service/resourcegroups/api_op_ListGroupResources.go index ef1e8694153..cef85c1653f 100644 --- a/service/resourcegroups/api_op_ListGroupResources.go +++ b/service/resourcegroups/api_op_ListGroupResources.go @@ -11,8 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of ARNs of the resources that are members of a specified -// resource group. +// Returns a list of Amazon resource names (ARNs) of the resources that are +// members of a specified resource group. // // # Minimum permissions // @@ -67,7 +67,7 @@ type ListGroupResourcesInput struct { // query type (tag-based or Amazon CloudFront stack-based queries). Filters []types.ResourceFilter - // The name or the ARN of the resource group + // The name or the Amazon resource name (ARN) of the resource group. Group *string // Deprecated - don't use this parameter. Use the Group request field instead. diff --git a/service/resourcegroups/api_op_ListGroupingStatuses.go b/service/resourcegroups/api_op_ListGroupingStatuses.go new file mode 100644 index 00000000000..df44bb24c88 --- /dev/null +++ b/service/resourcegroups/api_op_ListGroupingStatuses.go @@ -0,0 +1,277 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package resourcegroups + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/resourcegroups/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the status of the last grouping or ungrouping action for each resource +// in the specified application group. +func (c *Client) ListGroupingStatuses(ctx context.Context, params *ListGroupingStatusesInput, optFns ...func(*Options)) (*ListGroupingStatusesOutput, error) { + if params == nil { + params = &ListGroupingStatusesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListGroupingStatuses", params, optFns, c.addOperationListGroupingStatusesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListGroupingStatusesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListGroupingStatusesInput struct { + + // The application group identifier, expressed as an Amazon resource name (ARN) or + // the application group name. + // + // This member is required. + Group *string + + // The filter name and value pair that is used to return more specific results + // from a list of resources. + Filters []types.ListGroupingStatusesFilter + + // The maximum number of resources and their statuses returned in the response. + MaxResults *int32 + + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more output + // is available. Set this parameter to the value provided by a previous call's + // NextToken response to indicate where the output should continue from. + NextToken *string + + noSmithyDocumentSerde +} + +type ListGroupingStatusesOutput struct { + + // The application group identifier, expressed as an Amazon resource name (ARN) or + // the application group name. + Group *string + + // Returns details about the grouping or ungrouping status of the resources in the + // specified application group. + GroupingStatuses []types.GroupingStatusesItem + + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You should + // repeat this until the NextToken response element comes back as null . + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListGroupingStatusesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListGroupingStatuses{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListGroupingStatuses{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListGroupingStatuses"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListGroupingStatusesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGroupingStatuses(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListGroupingStatusesPaginatorOptions is the paginator options for +// ListGroupingStatuses +type ListGroupingStatusesPaginatorOptions struct { + // The maximum number of resources and their statuses returned in the response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListGroupingStatusesPaginator is a paginator for ListGroupingStatuses +type ListGroupingStatusesPaginator struct { + options ListGroupingStatusesPaginatorOptions + client ListGroupingStatusesAPIClient + params *ListGroupingStatusesInput + nextToken *string + firstPage bool +} + +// NewListGroupingStatusesPaginator returns a new ListGroupingStatusesPaginator +func NewListGroupingStatusesPaginator(client ListGroupingStatusesAPIClient, params *ListGroupingStatusesInput, optFns ...func(*ListGroupingStatusesPaginatorOptions)) *ListGroupingStatusesPaginator { + if params == nil { + params = &ListGroupingStatusesInput{} + } + + options := ListGroupingStatusesPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListGroupingStatusesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListGroupingStatusesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListGroupingStatuses page. +func (p *ListGroupingStatusesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListGroupingStatusesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListGroupingStatuses(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListGroupingStatusesAPIClient is a client that implements the +// ListGroupingStatuses operation. +type ListGroupingStatusesAPIClient interface { + ListGroupingStatuses(context.Context, *ListGroupingStatusesInput, ...func(*Options)) (*ListGroupingStatusesOutput, error) +} + +var _ ListGroupingStatusesAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListGroupingStatuses(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListGroupingStatuses", + } +} diff --git a/service/resourcegroups/api_op_ListGroups.go b/service/resourcegroups/api_op_ListGroups.go index 4f04b2ed2e6..7a1bbca2db1 100644 --- a/service/resourcegroups/api_op_ListGroups.go +++ b/service/resourcegroups/api_op_ListGroups.go @@ -47,6 +47,8 @@ type ListGroupsInput struct { // have the specified configuration types attached. The current supported values // are: // + // - AWS::ResourceGroups::ApplicationGroup + // // - AWS::AppRegistry::Application // // - AWS::AppRegistry::ApplicationResourceGroups diff --git a/service/resourcegroups/api_op_ListTagSyncTasks.go b/service/resourcegroups/api_op_ListTagSyncTasks.go new file mode 100644 index 00000000000..581e685396d --- /dev/null +++ b/service/resourcegroups/api_op_ListTagSyncTasks.go @@ -0,0 +1,268 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package resourcegroups + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/resourcegroups/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of tag-sync tasks. +// +// # Minimum permissions +// +// To run this command, you must have the following permissions: +// +// - resource-groups:ListTagSyncTasks with the group passed in the filters as the +// resource or * if using no filters +func (c *Client) ListTagSyncTasks(ctx context.Context, params *ListTagSyncTasksInput, optFns ...func(*Options)) (*ListTagSyncTasksOutput, error) { + if params == nil { + params = &ListTagSyncTasksInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagSyncTasks", params, optFns, c.addOperationListTagSyncTasksMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagSyncTasksOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListTagSyncTasksInput struct { + + // The Amazon resource name (ARN) or name of the application group for which you + // want to return a list of tag-sync tasks. + Filters []types.ListTagSyncTasksFilter + + // The maximum number of results to be included in the response. + MaxResults *int32 + + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more output + // is available. Set this parameter to the value provided by a previous call's + // NextToken response to indicate where the output should continue from. + NextToken *string + + noSmithyDocumentSerde +} + +type ListTagSyncTasksOutput struct { + + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You should + // repeat this until the NextToken response element comes back as null . + NextToken *string + + // A list of tag-sync tasks and information about each task. + TagSyncTasks []types.TagSyncTaskItem + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagSyncTasksMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTagSyncTasks{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTagSyncTasks{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagSyncTasks"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagSyncTasks(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListTagSyncTasksPaginatorOptions is the paginator options for ListTagSyncTasks +type ListTagSyncTasksPaginatorOptions struct { + // The maximum number of results to be included in the response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListTagSyncTasksPaginator is a paginator for ListTagSyncTasks +type ListTagSyncTasksPaginator struct { + options ListTagSyncTasksPaginatorOptions + client ListTagSyncTasksAPIClient + params *ListTagSyncTasksInput + nextToken *string + firstPage bool +} + +// NewListTagSyncTasksPaginator returns a new ListTagSyncTasksPaginator +func NewListTagSyncTasksPaginator(client ListTagSyncTasksAPIClient, params *ListTagSyncTasksInput, optFns ...func(*ListTagSyncTasksPaginatorOptions)) *ListTagSyncTasksPaginator { + if params == nil { + params = &ListTagSyncTasksInput{} + } + + options := ListTagSyncTasksPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListTagSyncTasksPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListTagSyncTasksPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListTagSyncTasks page. +func (p *ListTagSyncTasksPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTagSyncTasksOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListTagSyncTasks(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListTagSyncTasksAPIClient is a client that implements the ListTagSyncTasks +// operation. +type ListTagSyncTasksAPIClient interface { + ListTagSyncTasks(context.Context, *ListTagSyncTasksInput, ...func(*Options)) (*ListTagSyncTasksOutput, error) +} + +var _ ListTagSyncTasksAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListTagSyncTasks(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTagSyncTasks", + } +} diff --git a/service/resourcegroups/api_op_PutGroupConfiguration.go b/service/resourcegroups/api_op_PutGroupConfiguration.go index d52e2bdc810..3ad90f9fe85 100644 --- a/service/resourcegroups/api_op_PutGroupConfiguration.go +++ b/service/resourcegroups/api_op_PutGroupConfiguration.go @@ -50,8 +50,8 @@ type PutGroupConfigurationInput struct { // [Service configurations for Resource Groups]: https://docs.aws.amazon.com/ARG/latest/APIReference/about-slg.html Configuration []types.GroupConfigurationItem - // The name or ARN of the resource group with the configuration that you want to - // update. + // The name or Amazon resource name (ARN) of the resource group with the + // configuration that you want to update. Group *string noSmithyDocumentSerde diff --git a/service/resourcegroups/api_op_StartTagSyncTask.go b/service/resourcegroups/api_op_StartTagSyncTask.go new file mode 100644 index 00000000000..d3e7ea974f6 --- /dev/null +++ b/service/resourcegroups/api_op_StartTagSyncTask.go @@ -0,0 +1,205 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package resourcegroups + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new tag-sync task to onboard and sync resources tagged with a +// specific tag key-value pair to an application. +// +// # Minimum permissions +// +// To run this command, you must have the following permissions: +// +// - resource-groups:StartTagSyncTask on the application group +// +// - resource-groups:CreateGroup +// +// - iam:PassRole on the role provided in the request +func (c *Client) StartTagSyncTask(ctx context.Context, params *StartTagSyncTaskInput, optFns ...func(*Options)) (*StartTagSyncTaskOutput, error) { + if params == nil { + params = &StartTagSyncTaskInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartTagSyncTask", params, optFns, c.addOperationStartTagSyncTaskMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartTagSyncTaskOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartTagSyncTaskInput struct { + + // The Amazon resource name (ARN) or name of the application group for which you + // want to create a tag-sync task. + // + // This member is required. + Group *string + + // The Amazon resource name (ARN) of the role assumed by the service to tag and + // untag resources on your behalf. + // + // This member is required. + RoleArn *string + + // The tag key. Resources tagged with this tag key-value pair will be added to the + // application. If a resource with this tag is later untagged, the tag-sync task + // removes the resource from the application. + // + // This member is required. + TagKey *string + + // The tag value. Resources tagged with this tag key-value pair will be added to + // the application. If a resource with this tag is later untagged, the tag-sync + // task removes the resource from the application. + // + // This member is required. + TagValue *string + + noSmithyDocumentSerde +} + +type StartTagSyncTaskOutput struct { + + // The Amazon resource name (ARN) of the application group for which you want to + // add or remove resources. + GroupArn *string + + // The name of the application group to onboard and sync resources. + GroupName *string + + // The Amazon resource name (ARN) of the role assumed by the service to tag and + // untag resources on your behalf. + RoleArn *string + + // The tag key of the tag-sync task. + TagKey *string + + // The tag value of the tag-sync task. + TagValue *string + + // The Amazon resource name (ARN) of the new tag-sync task. + TaskArn *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartTagSyncTaskMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartTagSyncTask{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartTagSyncTask{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StartTagSyncTask"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpStartTagSyncTaskValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartTagSyncTask(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartTagSyncTask(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartTagSyncTask", + } +} diff --git a/service/resourcegroups/api_op_Tag.go b/service/resourcegroups/api_op_Tag.go index ec84b80c176..7c4032170e6 100644 --- a/service/resourcegroups/api_op_Tag.go +++ b/service/resourcegroups/api_op_Tag.go @@ -10,9 +10,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Adds tags to a resource group with the specified ARN. Existing tags on a -// resource group are not changed if they are not specified in the request -// parameters. +// Adds tags to a resource group with the specified Amazon resource name (ARN). +// Existing tags on a resource group are not changed if they are not specified in +// the request parameters. // // Do not store personally identifiable information (PII) or other confidential or // sensitive information in tags. We use tags to provide you with billing and @@ -41,7 +41,7 @@ func (c *Client) Tag(ctx context.Context, params *TagInput, optFns ...func(*Opti type TagInput struct { - // The ARN of the resource group to which to add tags. + // The Amazon resource name (ARN) of the resource group to which to add tags. // // This member is required. Arn *string @@ -57,7 +57,7 @@ type TagInput struct { type TagOutput struct { - // The ARN of the tagged resource. + // The Amazon resource name (ARN) of the tagged resource. Arn *string // The tags that have been added to the specified resource group. diff --git a/service/resourcegroups/api_op_UngroupResources.go b/service/resourcegroups/api_op_UngroupResources.go index da924c1c51f..6552a2a5be4 100644 --- a/service/resourcegroups/api_op_UngroupResources.go +++ b/service/resourcegroups/api_op_UngroupResources.go @@ -38,12 +38,13 @@ func (c *Client) UngroupResources(ctx context.Context, params *UngroupResourcesI type UngroupResourcesInput struct { - // The name or the ARN of the resource group from which to remove the resources. + // The name or the Amazon resource name (ARN) of the resource group from which to + // remove the resources. // // This member is required. Group *string - // The ARNs of the resources to be removed from the group. + // The Amazon resource names (ARNs) of the resources to be removed from the group. // // This member is required. ResourceArns []string diff --git a/service/resourcegroups/api_op_Untag.go b/service/resourcegroups/api_op_Untag.go index 290ef2945e4..70ec66388ef 100644 --- a/service/resourcegroups/api_op_Untag.go +++ b/service/resourcegroups/api_op_Untag.go @@ -34,8 +34,9 @@ func (c *Client) Untag(ctx context.Context, params *UntagInput, optFns ...func(* type UntagInput struct { - // The ARN of the resource group from which to remove tags. The command removed - // both the specified keys and any values associated with those keys. + // The Amazon resource name (ARN) of the resource group from which to remove tags. + // The command removed both the specified keys and any values associated with those + // keys. // // This member is required. Arn *string @@ -50,7 +51,8 @@ type UntagInput struct { type UntagOutput struct { - // The ARN of the resource group from which tags have been removed. + // The Amazon resource name (ARN) of the resource group from which tags have been + // removed. Arn *string // The keys of the tags that were removed. diff --git a/service/resourcegroups/api_op_UpdateAccountSettings.go b/service/resourcegroups/api_op_UpdateAccountSettings.go index f0f50e5a13d..38c01f07cc2 100644 --- a/service/resourcegroups/api_op_UpdateAccountSettings.go +++ b/service/resourcegroups/api_op_UpdateAccountSettings.go @@ -35,6 +35,9 @@ type UpdateAccountSettingsInput struct { // Specifies whether you want to turn [group lifecycle events] on or off. // + // You can't turn on group lifecycle events if your resource groups quota is + // greater than 2,000. + // // [group lifecycle events]: https://docs.aws.amazon.com/ARG/latest/userguide/monitor-groups.html GroupLifecycleEventsDesiredStatus types.GroupLifecycleEventsDesiredStatus diff --git a/service/resourcegroups/api_op_UpdateGroup.go b/service/resourcegroups/api_op_UpdateGroup.go index 94b903d2b82..30df8595b91 100644 --- a/service/resourcegroups/api_op_UpdateGroup.go +++ b/service/resourcegroups/api_op_UpdateGroup.go @@ -36,12 +36,19 @@ func (c *Client) UpdateGroup(ctx context.Context, params *UpdateGroupInput, optF type UpdateGroupInput struct { + // The critical rank of the application group on a scale of 1 to 10, with a rank + // of 1 being the most critical, and a rank of 10 being least critical. + Criticality *int32 + // The new description that you want to update the resource group with. // Descriptions can contain letters, numbers, hyphens, underscores, periods, and // spaces. Description *string - // The name or the ARN of the resource group to modify. + // The name of the application group, which you can change at any time. + DisplayName *string + + // The name or the ARN of the resource group to update. Group *string // Don't use this parameter. Use Group instead. @@ -49,6 +56,10 @@ type UpdateGroupInput struct { // Deprecated: This field is deprecated, use Group instead. GroupName *string + // A name, email address or other identifier for the person or group who is + // considered as the owner of this application group within your organization. + Owner *string + noSmithyDocumentSerde } diff --git a/service/resourcegroups/api_op_UpdateGroupQuery.go b/service/resourcegroups/api_op_UpdateGroupQuery.go index 3109a14c191..83328ff6458 100644 --- a/service/resourcegroups/api_op_UpdateGroupQuery.go +++ b/service/resourcegroups/api_op_UpdateGroupQuery.go @@ -47,7 +47,7 @@ type UpdateGroupQueryInput struct { // This member is required. ResourceQuery *types.ResourceQuery - // The name or the ARN of the resource group to query. + // The name or the Amazon resource name (ARN) of the resource group to query. Group *string // Don't use this parameter. Use Group instead. diff --git a/service/resourcegroups/deserializers.go b/service/resourcegroups/deserializers.go index cb80c953b66..72cd54d4050 100644 --- a/service/resourcegroups/deserializers.go +++ b/service/resourcegroups/deserializers.go @@ -17,6 +17,7 @@ import ( "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" + "io/ioutil" "strings" "time" ) @@ -29,6 +30,115 @@ func deserializeS3Expires(v string) (*time.Time, error) { return &t, nil } +type awsRestjson1_deserializeOpCancelTagSyncTask struct { +} + +func (*awsRestjson1_deserializeOpCancelTagSyncTask) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCancelTagSyncTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCancelTagSyncTask(response, &metadata) + } + output := &CancelTagSyncTaskOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCancelTagSyncTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestjson1_deserializeOpCreateGroup struct { } @@ -1197,7 +1307,7 @@ func awsRestjson1_deserializeOpDocumentGetTagsOutput(v **GetTagsOutput, value in if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected GroupArn to be of type string, got %T instead", value) + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) } sv.Arn = ptr.String(jtv) } @@ -1216,14 +1326,14 @@ func awsRestjson1_deserializeOpDocumentGetTagsOutput(v **GetTagsOutput, value in return nil } -type awsRestjson1_deserializeOpGroupResources struct { +type awsRestjson1_deserializeOpGetTagSyncTask struct { } -func (*awsRestjson1_deserializeOpGroupResources) ID() string { +func (*awsRestjson1_deserializeOpGetTagSyncTask) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGroupResources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGetTagSyncTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1241,9 +1351,9 @@ func (m *awsRestjson1_deserializeOpGroupResources) HandleDeserialize(ctx context } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGroupResources(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGetTagSyncTask(response, &metadata) } - output := &GroupResourcesOutput{} + output := &GetTagSyncTaskOutput{} out.Result = output var buff [1024]byte @@ -1264,7 +1374,7 @@ func (m *awsRestjson1_deserializeOpGroupResources) HandleDeserialize(ctx context return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentGroupResourcesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetTagSyncTaskOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1278,7 +1388,7 @@ func (m *awsRestjson1_deserializeOpGroupResources) HandleDeserialize(ctx context return out, metadata, err } -func awsRestjson1_deserializeOpErrorGroupResources(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGetTagSyncTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1337,6 +1447,9 @@ func awsRestjson1_deserializeOpErrorGroupResources(response *smithyhttp.Response case strings.EqualFold("TooManyRequestsException", errorCode): return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1347,7 +1460,7 @@ func awsRestjson1_deserializeOpErrorGroupResources(response *smithyhttp.Response } } -func awsRestjson1_deserializeOpDocumentGroupResourcesOutput(v **GroupResourcesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGetTagSyncTaskOutput(v **GetTagSyncTaskOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1360,28 +1473,101 @@ func awsRestjson1_deserializeOpDocumentGroupResourcesOutput(v **GroupResourcesOu return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GroupResourcesOutput + var sv *GetTagSyncTaskOutput if *v == nil { - sv = &GroupResourcesOutput{} + sv = &GetTagSyncTaskOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "Failed": - if err := awsRestjson1_deserializeDocumentFailedResourceList(&sv.Failed, value); err != nil { - return err + case "CreatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected timestamp to be a JSON Number, got %T instead", value) + + } } - case "Pending": - if err := awsRestjson1_deserializeDocumentPendingResourceList(&sv.Pending, value); err != nil { - return err + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) } - case "Succeeded": - if err := awsRestjson1_deserializeDocumentResourceArnList(&sv.Succeeded, value); err != nil { - return err + case "GroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) + } + sv.GroupArn = ptr.String(jtv) + } + + case "GroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupName to be of type string, got %T instead", value) + } + sv.GroupName = ptr.String(jtv) + } + + case "RoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleArn to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagSyncTaskStatus to be of type string, got %T instead", value) + } + sv.Status = types.TagSyncTaskStatus(jtv) + } + + case "TagKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.TagKey = ptr.String(jtv) + } + + case "TagValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.TagValue = ptr.String(jtv) + } + + case "TaskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagSyncTaskArn to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) } default: @@ -1393,14 +1579,14 @@ func awsRestjson1_deserializeOpDocumentGroupResourcesOutput(v **GroupResourcesOu return nil } -type awsRestjson1_deserializeOpListGroupResources struct { +type awsRestjson1_deserializeOpGroupResources struct { } -func (*awsRestjson1_deserializeOpListGroupResources) ID() string { +func (*awsRestjson1_deserializeOpGroupResources) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListGroupResources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpGroupResources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1418,9 +1604,9 @@ func (m *awsRestjson1_deserializeOpListGroupResources) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListGroupResources(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorGroupResources(response, &metadata) } - output := &ListGroupResourcesOutput{} + output := &GroupResourcesOutput{} out.Result = output var buff [1024]byte @@ -1441,7 +1627,7 @@ func (m *awsRestjson1_deserializeOpListGroupResources) HandleDeserialize(ctx con return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListGroupResourcesOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentGroupResourcesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1455,7 +1641,7 @@ func (m *awsRestjson1_deserializeOpListGroupResources) HandleDeserialize(ctx con return out, metadata, err } -func awsRestjson1_deserializeOpErrorListGroupResources(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorGroupResources(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1514,9 +1700,6 @@ func awsRestjson1_deserializeOpErrorListGroupResources(response *smithyhttp.Resp case strings.EqualFold("TooManyRequestsException", errorCode): return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) - case strings.EqualFold("UnauthorizedException", errorCode): - return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) - default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1527,7 +1710,7 @@ func awsRestjson1_deserializeOpErrorListGroupResources(response *smithyhttp.Resp } } -func awsRestjson1_deserializeOpDocumentListGroupResourcesOutput(v **ListGroupResourcesOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentGroupResourcesOutput(v **GroupResourcesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1540,36 +1723,27 @@ func awsRestjson1_deserializeOpDocumentListGroupResourcesOutput(v **ListGroupRes return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListGroupResourcesOutput + var sv *GroupResourcesOutput if *v == nil { - sv = &ListGroupResourcesOutput{} + sv = &GroupResourcesOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "NextToken": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) - } - sv.NextToken = ptr.String(jtv) - } - - case "QueryErrors": - if err := awsRestjson1_deserializeDocumentQueryErrorList(&sv.QueryErrors, value); err != nil { + case "Failed": + if err := awsRestjson1_deserializeDocumentFailedResourceList(&sv.Failed, value); err != nil { return err } - case "ResourceIdentifiers": - if err := awsRestjson1_deserializeDocumentResourceIdentifierList(&sv.ResourceIdentifiers, value); err != nil { + case "Pending": + if err := awsRestjson1_deserializeDocumentPendingResourceList(&sv.Pending, value); err != nil { return err } - case "Resources": - if err := awsRestjson1_deserializeDocumentListGroupResourcesItemList(&sv.Resources, value); err != nil { + case "Succeeded": + if err := awsRestjson1_deserializeDocumentResourceArnList(&sv.Succeeded, value); err != nil { return err } @@ -1582,14 +1756,14 @@ func awsRestjson1_deserializeOpDocumentListGroupResourcesOutput(v **ListGroupRes return nil } -type awsRestjson1_deserializeOpListGroups struct { +type awsRestjson1_deserializeOpListGroupingStatuses struct { } -func (*awsRestjson1_deserializeOpListGroups) ID() string { +func (*awsRestjson1_deserializeOpListGroupingStatuses) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpListGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListGroupingStatuses) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1607,9 +1781,9 @@ func (m *awsRestjson1_deserializeOpListGroups) HandleDeserialize(ctx context.Con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorListGroups(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListGroupingStatuses(response, &metadata) } - output := &ListGroupsOutput{} + output := &ListGroupingStatusesOutput{} out.Result = output var buff [1024]byte @@ -1630,7 +1804,7 @@ func (m *awsRestjson1_deserializeOpListGroups) HandleDeserialize(ctx context.Con return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentListGroupsOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentListGroupingStatusesOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -1644,7 +1818,7 @@ func (m *awsRestjson1_deserializeOpListGroups) HandleDeserialize(ctx context.Con return out, metadata, err } -func awsRestjson1_deserializeOpErrorListGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorListGroupingStatuses(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -1710,7 +1884,7 @@ func awsRestjson1_deserializeOpErrorListGroups(response *smithyhttp.Response, me } } -func awsRestjson1_deserializeOpDocumentListGroupsOutput(v **ListGroupsOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentListGroupingStatusesOutput(v **ListGroupingStatusesOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -1723,22 +1897,26 @@ func awsRestjson1_deserializeOpDocumentListGroupsOutput(v **ListGroupsOutput, va return fmt.Errorf("unexpected JSON type %v", value) } - var sv *ListGroupsOutput + var sv *ListGroupingStatusesOutput if *v == nil { - sv = &ListGroupsOutput{} + sv = &ListGroupingStatusesOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "GroupIdentifiers": - if err := awsRestjson1_deserializeDocumentGroupIdentifierList(&sv.GroupIdentifiers, value); err != nil { - return err + case "Group": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupStringV2 to be of type string, got %T instead", value) + } + sv.Group = ptr.String(jtv) } - case "Groups": - if err := awsRestjson1_deserializeDocumentGroupList(&sv.Groups, value); err != nil { + case "GroupingStatuses": + if err := awsRestjson1_deserializeDocumentGroupingStatusesList(&sv.GroupingStatuses, value); err != nil { return err } @@ -1760,14 +1938,14 @@ func awsRestjson1_deserializeOpDocumentListGroupsOutput(v **ListGroupsOutput, va return nil } -type awsRestjson1_deserializeOpPutGroupConfiguration struct { +type awsRestjson1_deserializeOpListGroupResources struct { } -func (*awsRestjson1_deserializeOpPutGroupConfiguration) ID() string { +func (*awsRestjson1_deserializeOpListGroupResources) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpPutGroupConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpListGroupResources) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -1785,12 +1963,555 @@ func (m *awsRestjson1_deserializeOpPutGroupConfiguration) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorPutGroupConfiguration(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorListGroupResources(response, &metadata) } - output := &PutGroupConfigurationOutput{} + output := &ListGroupResourcesOutput{} out.Result = output - span.End() + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListGroupResourcesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListGroupResources(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListGroupResourcesOutput(v **ListGroupResourcesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListGroupResourcesOutput + if *v == nil { + sv = &ListGroupResourcesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "QueryErrors": + if err := awsRestjson1_deserializeDocumentQueryErrorList(&sv.QueryErrors, value); err != nil { + return err + } + + case "ResourceIdentifiers": + if err := awsRestjson1_deserializeDocumentResourceIdentifierList(&sv.ResourceIdentifiers, value); err != nil { + return err + } + + case "Resources": + if err := awsRestjson1_deserializeDocumentListGroupResourcesItemList(&sv.Resources, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListGroups struct { +} + +func (*awsRestjson1_deserializeOpListGroups) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListGroups(response, &metadata) + } + output := &ListGroupsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListGroupsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListGroupsOutput(v **ListGroupsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListGroupsOutput + if *v == nil { + sv = &ListGroupsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GroupIdentifiers": + if err := awsRestjson1_deserializeDocumentGroupIdentifierList(&sv.GroupIdentifiers, value); err != nil { + return err + } + + case "Groups": + if err := awsRestjson1_deserializeDocumentGroupList(&sv.Groups, value); err != nil { + return err + } + + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTagSyncTasks struct { +} + +func (*awsRestjson1_deserializeOpListTagSyncTasks) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTagSyncTasks) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTagSyncTasks(response, &metadata) + } + output := &ListTagSyncTasksOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTagSyncTasksOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTagSyncTasks(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTagSyncTasksOutput(v **ListTagSyncTasksOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagSyncTasksOutput + if *v == nil { + sv = &ListTagSyncTasksOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected NextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + case "TagSyncTasks": + if err := awsRestjson1_deserializeDocumentTagSyncTaskList(&sv.TagSyncTasks, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpPutGroupConfiguration struct { +} + +func (*awsRestjson1_deserializeOpPutGroupConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpPutGroupConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorPutGroupConfiguration(response, &metadata) + } + output := &PutGroupConfigurationOutput{} + out.Result = output + + span.End() return out, metadata, err } @@ -2044,6 +2765,225 @@ func awsRestjson1_deserializeOpDocumentSearchResourcesOutput(v **SearchResources return nil } +type awsRestjson1_deserializeOpStartTagSyncTask struct { +} + +func (*awsRestjson1_deserializeOpStartTagSyncTask) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartTagSyncTask) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartTagSyncTask(response, &metadata) + } + output := &StartTagSyncTaskOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartTagSyncTaskOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartTagSyncTask(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("BadRequestException", errorCode): + return awsRestjson1_deserializeErrorBadRequestException(response, errorBody) + + case strings.EqualFold("ForbiddenException", errorCode): + return awsRestjson1_deserializeErrorForbiddenException(response, errorBody) + + case strings.EqualFold("InternalServerErrorException", errorCode): + return awsRestjson1_deserializeErrorInternalServerErrorException(response, errorBody) + + case strings.EqualFold("MethodNotAllowedException", errorCode): + return awsRestjson1_deserializeErrorMethodNotAllowedException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsRestjson1_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TooManyRequestsException", errorCode): + return awsRestjson1_deserializeErrorTooManyRequestsException(response, errorBody) + + case strings.EqualFold("UnauthorizedException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartTagSyncTaskOutput(v **StartTagSyncTaskOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartTagSyncTaskOutput + if *v == nil { + sv = &StartTagSyncTaskOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) + } + sv.GroupArn = ptr.String(jtv) + } + + case "GroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupName to be of type string, got %T instead", value) + } + sv.GroupName = ptr.String(jtv) + } + + case "RoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleArn to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "TagKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.TagKey = ptr.String(jtv) + } + + case "TagValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.TagValue = ptr.String(jtv) + } + + case "TaskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagSyncTaskArn to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpTag struct { } @@ -2201,7 +3141,7 @@ func awsRestjson1_deserializeOpDocumentTagOutput(v **TagOutput, value interface{ if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected GroupArn to be of type string, got %T instead", value) + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) } sv.Arn = ptr.String(jtv) } @@ -2554,7 +3494,7 @@ func awsRestjson1_deserializeOpDocumentUntagOutput(v **UntagOutput, value interf if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected GroupArn to be of type string, got %T instead", value) + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) } sv.Arn = ptr.String(jtv) } @@ -3381,6 +4321,42 @@ func awsRestjson1_deserializeDocumentAccountSettings(v **types.AccountSettings, return nil } +func awsRestjson1_deserializeDocumentApplicationTag(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ApplicationArn to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + func awsRestjson1_deserializeDocumentBadRequestException(v **types.BadRequestException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3575,6 +4551,24 @@ func awsRestjson1_deserializeDocumentGroup(v **types.Group, value interface{}) e for key, value := range shape { switch key { + case "ApplicationTag": + if err := awsRestjson1_deserializeDocumentApplicationTag(&sv.ApplicationTag, value); err != nil { + return err + } + + case "Criticality": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Criticality to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Criticality = ptr.Int32(int32(i64)) + } + case "Description": if value != nil { jtv, ok := value.(string) @@ -3584,11 +4578,20 @@ func awsRestjson1_deserializeDocumentGroup(v **types.Group, value interface{}) e sv.Description = ptr.String(jtv) } + case "DisplayName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DisplayName to be of type string, got %T instead", value) + } + sv.DisplayName = ptr.String(jtv) + } + case "GroupArn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected GroupArn to be of type string, got %T instead", value) + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) } sv.GroupArn = ptr.String(jtv) } @@ -3602,6 +4605,15 @@ func awsRestjson1_deserializeDocumentGroup(v **types.Group, value interface{}) e sv.Name = ptr.String(jtv) } + case "Owner": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Owner to be of type string, got %T instead", value) + } + sv.Owner = ptr.String(jtv) + } + default: _, _ = key, value @@ -3852,6 +4864,37 @@ func awsRestjson1_deserializeDocumentGroupIdentifier(v **types.GroupIdentifier, for key, value := range shape { switch key { + case "Criticality": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Criticality to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Criticality = ptr.Int32(int32(i64)) + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Description to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "DisplayName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DisplayName to be of type string, got %T instead", value) + } + sv.DisplayName = ptr.String(jtv) + } + case "GroupArn": if value != nil { jtv, ok := value.(string) @@ -3870,6 +4913,15 @@ func awsRestjson1_deserializeDocumentGroupIdentifier(v **types.GroupIdentifier, sv.GroupName = ptr.String(jtv) } + case "Owner": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Owner to be of type string, got %T instead", value) + } + sv.Owner = ptr.String(jtv) + } + default: _, _ = key, value @@ -3913,6 +4965,132 @@ func awsRestjson1_deserializeDocumentGroupIdentifierList(v *[]types.GroupIdentif return nil } +func awsRestjson1_deserializeDocumentGroupingStatusesItem(v **types.GroupingStatusesItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GroupingStatusesItem + if *v == nil { + sv = &types.GroupingStatusesItem{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Action": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupingType to be of type string, got %T instead", value) + } + sv.Action = types.GroupingType(jtv) + } + + case "ErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorCode to be of type string, got %T instead", value) + } + sv.ErrorCode = ptr.String(jtv) + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "ResourceArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourceArn to be of type string, got %T instead", value) + } + sv.ResourceArn = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupingStatus to be of type string, got %T instead", value) + } + sv.Status = types.GroupingStatus(jtv) + } + + case "UpdatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.UpdatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected timestamp to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentGroupingStatusesList(v *[]types.GroupingStatusesItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GroupingStatusesItem + if *v == nil { + cv = []types.GroupingStatusesItem{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GroupingStatusesItem + destAddr := &col + if err := awsRestjson1_deserializeDocumentGroupingStatusesItem(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentGroupList(v *[]types.Group, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -4658,6 +5836,159 @@ func awsRestjson1_deserializeDocumentTags(v *map[string]string, value interface{ return nil } +func awsRestjson1_deserializeDocumentTagSyncTaskItem(v **types.TagSyncTaskItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TagSyncTaskItem + if *v == nil { + sv = &types.TagSyncTaskItem{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CreatedAt": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedAt = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "ErrorMessage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.ErrorMessage = ptr.String(jtv) + } + + case "GroupArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupArnV2 to be of type string, got %T instead", value) + } + sv.GroupArn = ptr.String(jtv) + } + + case "GroupName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GroupName to be of type string, got %T instead", value) + } + sv.GroupName = ptr.String(jtv) + } + + case "RoleArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RoleArn to be of type string, got %T instead", value) + } + sv.RoleArn = ptr.String(jtv) + } + + case "Status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagSyncTaskStatus to be of type string, got %T instead", value) + } + sv.Status = types.TagSyncTaskStatus(jtv) + } + + case "TagKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKey to be of type string, got %T instead", value) + } + sv.TagKey = ptr.String(jtv) + } + + case "TagValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + sv.TagValue = ptr.String(jtv) + } + + case "TaskArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagSyncTaskArn to be of type string, got %T instead", value) + } + sv.TaskArn = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentTagSyncTaskList(v *[]types.TagSyncTaskItem, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.TagSyncTaskItem + if *v == nil { + cv = []types.TagSyncTaskItem{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.TagSyncTaskItem + destAddr := &col + if err := awsRestjson1_deserializeDocumentTagSyncTaskItem(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyRequestsException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/resourcegroups/doc.go b/service/resourcegroups/doc.go index 25fb389f0c9..b0ea204f3ac 100644 --- a/service/resourcegroups/doc.go +++ b/service/resourcegroups/doc.go @@ -31,8 +31,8 @@ // // - Applying, editing, and removing tags from resource groups // -// - Resolving resource group member ARNs so they can be returned as search -// results +// - Resolving resource group member Amazon resource names (ARN)s so they can be +// returned as search results // // - Getting data about resources that are members of a group // diff --git a/service/resourcegroups/generated.json b/service/resourcegroups/generated.json index bf6177598fc..89ad508636d 100644 --- a/service/resourcegroups/generated.json +++ b/service/resourcegroups/generated.json @@ -8,18 +8,23 @@ "files": [ "api_client.go", "api_client_test.go", + "api_op_CancelTagSyncTask.go", "api_op_CreateGroup.go", "api_op_DeleteGroup.go", "api_op_GetAccountSettings.go", "api_op_GetGroup.go", "api_op_GetGroupConfiguration.go", "api_op_GetGroupQuery.go", + "api_op_GetTagSyncTask.go", "api_op_GetTags.go", "api_op_GroupResources.go", "api_op_ListGroupResources.go", + "api_op_ListGroupingStatuses.go", "api_op_ListGroups.go", + "api_op_ListTagSyncTasks.go", "api_op_PutGroupConfiguration.go", "api_op_SearchResources.go", + "api_op_StartTagSyncTask.go", "api_op_Tag.go", "api_op_UngroupResources.go", "api_op_Untag.go", diff --git a/service/resourcegroups/serializers.go b/service/resourcegroups/serializers.go index 15f6f6902f8..2724cd04f23 100644 --- a/service/resourcegroups/serializers.go +++ b/service/resourcegroups/serializers.go @@ -15,6 +15,87 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) +type awsRestjson1_serializeOpCancelTagSyncTask struct { +} + +func (*awsRestjson1_serializeOpCancelTagSyncTask) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCancelTagSyncTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CancelTagSyncTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/cancel-tag-sync-task") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCancelTagSyncTaskInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCancelTagSyncTaskInput(v *CancelTagSyncTaskInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCancelTagSyncTaskInput(v *CancelTagSyncTaskInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TaskArn != nil { + ok := object.Key("TaskArn") + ok.String(*v.TaskArn) + } + + return nil +} + type awsRestjson1_serializeOpCreateGroup struct { } @@ -95,16 +176,31 @@ func awsRestjson1_serializeOpDocumentCreateGroupInput(v *CreateGroupInput, value } } + if v.Criticality != nil { + ok := object.Key("Criticality") + ok.Integer(*v.Criticality) + } + if v.Description != nil { ok := object.Key("Description") ok.String(*v.Description) } + if v.DisplayName != nil { + ok := object.Key("DisplayName") + ok.String(*v.DisplayName) + } + if v.Name != nil { ok := object.Key("Name") ok.String(*v.Name) } + if v.Owner != nil { + ok := object.Key("Owner") + ok.String(*v.Owner) + } + if v.ResourceQuery != nil { ok := object.Key("ResourceQuery") if err := awsRestjson1_serializeDocumentResourceQuery(v.ResourceQuery, ok); err != nil { @@ -590,6 +686,87 @@ func awsRestjson1_serializeOpHttpBindingsGetTagsInput(v *GetTagsInput, encoder * return nil } +type awsRestjson1_serializeOpGetTagSyncTask struct { +} + +func (*awsRestjson1_serializeOpGetTagSyncTask) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetTagSyncTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetTagSyncTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/get-tag-sync-task") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentGetTagSyncTaskInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetTagSyncTaskInput(v *GetTagSyncTaskInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentGetTagSyncTaskInput(v *GetTagSyncTaskInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TaskArn != nil { + ok := object.Key("TaskArn") + ok.String(*v.TaskArn) + } + + return nil +} + type awsRestjson1_serializeOpGroupResources struct { } @@ -678,6 +855,104 @@ func awsRestjson1_serializeOpDocumentGroupResourcesInput(v *GroupResourcesInput, return nil } +type awsRestjson1_serializeOpListGroupingStatuses struct { +} + +func (*awsRestjson1_serializeOpListGroupingStatuses) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListGroupingStatuses) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListGroupingStatusesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/list-grouping-statuses") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListGroupingStatusesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListGroupingStatusesInput(v *ListGroupingStatusesInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListGroupingStatusesInput(v *ListGroupingStatusesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsRestjson1_serializeDocumentListGroupingStatusesFilterList(v.Filters, ok); err != nil { + return err + } + } + + if v.Group != nil { + ok := object.Key("Group") + ok.String(*v.Group) + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + type awsRestjson1_serializeOpListGroupResources struct { } @@ -876,6 +1151,99 @@ func awsRestjson1_serializeOpDocumentListGroupsInput(v *ListGroupsInput, value s return nil } +type awsRestjson1_serializeOpListTagSyncTasks struct { +} + +func (*awsRestjson1_serializeOpListTagSyncTasks) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTagSyncTasks) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagSyncTasksInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/list-tag-sync-tasks") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentListTagSyncTasksInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTagSyncTasksInput(v *ListTagSyncTasksInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentListTagSyncTasksInput(v *ListTagSyncTasksInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Filters != nil { + ok := object.Key("Filters") + if err := awsRestjson1_serializeDocumentListTagSyncTasksFilterList(v.Filters, ok); err != nil { + return err + } + } + + if v.MaxResults != nil { + ok := object.Key("MaxResults") + ok.Integer(*v.MaxResults) + } + + if v.NextToken != nil { + ok := object.Key("NextToken") + ok.String(*v.NextToken) + } + + return nil +} + type awsRestjson1_serializeOpPutGroupConfiguration struct { } @@ -1057,6 +1425,102 @@ func awsRestjson1_serializeOpDocumentSearchResourcesInput(v *SearchResourcesInpu return nil } +type awsRestjson1_serializeOpStartTagSyncTask struct { +} + +func (*awsRestjson1_serializeOpStartTagSyncTask) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartTagSyncTask) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartTagSyncTaskInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/start-tag-sync-task") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartTagSyncTaskInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartTagSyncTaskInput(v *StartTagSyncTaskInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartTagSyncTaskInput(v *StartTagSyncTaskInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Group != nil { + ok := object.Key("Group") + ok.String(*v.Group) + } + + if v.RoleArn != nil { + ok := object.Key("RoleArn") + ok.String(*v.RoleArn) + } + + if v.TagKey != nil { + ok := object.Key("TagKey") + ok.String(*v.TagKey) + } + + if v.TagValue != nil { + ok := object.Key("TagValue") + ok.String(*v.TagValue) + } + + return nil +} + type awsRestjson1_serializeOpTag struct { } @@ -1491,11 +1955,21 @@ func awsRestjson1_serializeOpDocumentUpdateGroupInput(v *UpdateGroupInput, value object := value.Object() defer object.Close() + if v.Criticality != nil { + ok := object.Key("Criticality") + ok.Integer(*v.Criticality) + } + if v.Description != nil { ok := object.Key("Description") ok.String(*v.Description) } + if v.DisplayName != nil { + ok := object.Key("DisplayName") + ok.String(*v.DisplayName) + } + if v.Group != nil { ok := object.Key("Group") ok.String(*v.Group) @@ -1506,6 +1980,11 @@ func awsRestjson1_serializeOpDocumentUpdateGroupInput(v *UpdateGroupInput, value ok.String(*v.GroupName) } + if v.Owner != nil { + ok := object.Key("Owner") + ok.String(*v.Owner) + } + return nil } @@ -1720,6 +2199,79 @@ func awsRestjson1_serializeDocumentGroupParameterList(v []types.GroupConfigurati return nil } +func awsRestjson1_serializeDocumentListGroupingStatusesFilter(v *types.ListGroupingStatusesFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.Name) > 0 { + ok := object.Key("Name") + ok.String(string(v.Name)) + } + + if v.Values != nil { + ok := object.Key("Values") + if err := awsRestjson1_serializeDocumentListGroupingStatusesFilterValues(v.Values, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentListGroupingStatusesFilterList(v []types.ListGroupingStatusesFilter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentListGroupingStatusesFilter(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentListGroupingStatusesFilterValues(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentListTagSyncTasksFilter(v *types.ListTagSyncTasksFilter, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GroupArn != nil { + ok := object.Key("GroupArn") + ok.String(*v.GroupArn) + } + + if v.GroupName != nil { + ok := object.Key("GroupName") + ok.String(*v.GroupName) + } + + return nil +} + +func awsRestjson1_serializeDocumentListTagSyncTasksFilterList(v []types.ListTagSyncTasksFilter, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentListTagSyncTasksFilter(&v[i], av); err != nil { + return err + } + } + return nil +} + func awsRestjson1_serializeDocumentResourceArnList(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/service/resourcegroups/snapshot/api_op_CancelTagSyncTask.go.snap b/service/resourcegroups/snapshot/api_op_CancelTagSyncTask.go.snap new file mode 100644 index 00000000000..76db4a52041 --- /dev/null +++ b/service/resourcegroups/snapshot/api_op_CancelTagSyncTask.go.snap @@ -0,0 +1,41 @@ +CancelTagSyncTask + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/resourcegroups/snapshot/api_op_GetTagSyncTask.go.snap b/service/resourcegroups/snapshot/api_op_GetTagSyncTask.go.snap new file mode 100644 index 00000000000..369739e9846 --- /dev/null +++ b/service/resourcegroups/snapshot/api_op_GetTagSyncTask.go.snap @@ -0,0 +1,41 @@ +GetTagSyncTask + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/resourcegroups/snapshot/api_op_ListGroupingStatuses.go.snap b/service/resourcegroups/snapshot/api_op_ListGroupingStatuses.go.snap new file mode 100644 index 00000000000..3d8e83c9da6 --- /dev/null +++ b/service/resourcegroups/snapshot/api_op_ListGroupingStatuses.go.snap @@ -0,0 +1,41 @@ +ListGroupingStatuses + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/resourcegroups/snapshot/api_op_ListTagSyncTasks.go.snap b/service/resourcegroups/snapshot/api_op_ListTagSyncTasks.go.snap new file mode 100644 index 00000000000..1b14fea3fd8 --- /dev/null +++ b/service/resourcegroups/snapshot/api_op_ListTagSyncTasks.go.snap @@ -0,0 +1,40 @@ +ListTagSyncTasks + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/resourcegroups/snapshot/api_op_StartTagSyncTask.go.snap b/service/resourcegroups/snapshot/api_op_StartTagSyncTask.go.snap new file mode 100644 index 00000000000..f9a59e5263a --- /dev/null +++ b/service/resourcegroups/snapshot/api_op_StartTagSyncTask.go.snap @@ -0,0 +1,41 @@ +StartTagSyncTask + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/resourcegroups/snapshot_test.go b/service/resourcegroups/snapshot_test.go index 01c78037c3f..989e0c7310a 100644 --- a/service/resourcegroups/snapshot_test.go +++ b/service/resourcegroups/snapshot_test.go @@ -62,6 +62,18 @@ func testSnapshot(stack *middleware.Stack, operation string) error { } return snapshotOK{} } +func TestCheckSnapshot_CancelTagSyncTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.CancelTagSyncTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CancelTagSyncTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_CreateGroup(t *testing.T) { svc := New(Options{}) _, err := svc.CreateGroup(context.Background(), nil, func(o *Options) { @@ -146,6 +158,18 @@ func TestCheckSnapshot_GetTags(t *testing.T) { } } +func TestCheckSnapshot_GetTagSyncTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetTagSyncTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetTagSyncTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_GroupResources(t *testing.T) { svc := New(Options{}) _, err := svc.GroupResources(context.Background(), nil, func(o *Options) { @@ -158,6 +182,18 @@ func TestCheckSnapshot_GroupResources(t *testing.T) { } } +func TestCheckSnapshot_ListGroupingStatuses(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListGroupingStatuses(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListGroupingStatuses") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_ListGroupResources(t *testing.T) { svc := New(Options{}) _, err := svc.ListGroupResources(context.Background(), nil, func(o *Options) { @@ -182,6 +218,18 @@ func TestCheckSnapshot_ListGroups(t *testing.T) { } } +func TestCheckSnapshot_ListTagSyncTasks(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTagSyncTasks(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTagSyncTasks") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_PutGroupConfiguration(t *testing.T) { svc := New(Options{}) _, err := svc.PutGroupConfiguration(context.Background(), nil, func(o *Options) { @@ -206,6 +254,18 @@ func TestCheckSnapshot_SearchResources(t *testing.T) { } } +func TestCheckSnapshot_StartTagSyncTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.StartTagSyncTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "StartTagSyncTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_Tag(t *testing.T) { svc := New(Options{}) _, err := svc.Tag(context.Background(), nil, func(o *Options) { @@ -277,6 +337,18 @@ func TestCheckSnapshot_UpdateGroupQuery(t *testing.T) { t.Fatal(err) } } +func TestUpdateSnapshot_CancelTagSyncTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.CancelTagSyncTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CancelTagSyncTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_CreateGroup(t *testing.T) { svc := New(Options{}) _, err := svc.CreateGroup(context.Background(), nil, func(o *Options) { @@ -361,6 +433,18 @@ func TestUpdateSnapshot_GetTags(t *testing.T) { } } +func TestUpdateSnapshot_GetTagSyncTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetTagSyncTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetTagSyncTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_GroupResources(t *testing.T) { svc := New(Options{}) _, err := svc.GroupResources(context.Background(), nil, func(o *Options) { @@ -373,6 +457,18 @@ func TestUpdateSnapshot_GroupResources(t *testing.T) { } } +func TestUpdateSnapshot_ListGroupingStatuses(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListGroupingStatuses(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListGroupingStatuses") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_ListGroupResources(t *testing.T) { svc := New(Options{}) _, err := svc.ListGroupResources(context.Background(), nil, func(o *Options) { @@ -397,6 +493,18 @@ func TestUpdateSnapshot_ListGroups(t *testing.T) { } } +func TestUpdateSnapshot_ListTagSyncTasks(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTagSyncTasks(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTagSyncTasks") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_PutGroupConfiguration(t *testing.T) { svc := New(Options{}) _, err := svc.PutGroupConfiguration(context.Background(), nil, func(o *Options) { @@ -421,6 +529,18 @@ func TestUpdateSnapshot_SearchResources(t *testing.T) { } } +func TestUpdateSnapshot_StartTagSyncTask(t *testing.T) { + svc := New(Options{}) + _, err := svc.StartTagSyncTask(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "StartTagSyncTask") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_Tag(t *testing.T) { svc := New(Options{}) _, err := svc.Tag(context.Background(), nil, func(o *Options) { diff --git a/service/resourcegroups/types/enums.go b/service/resourcegroups/types/enums.go index eeaf6600612..337a09a7ac5 100644 --- a/service/resourcegroups/types/enums.go +++ b/service/resourcegroups/types/enums.go @@ -29,6 +29,9 @@ type GroupFilterName string const ( GroupFilterNameResourceType GroupFilterName = "resource-type" GroupFilterNameConfigurationType GroupFilterName = "configuration-type" + GroupFilterNameOwner GroupFilterName = "owner" + GroupFilterNameDisplayName GroupFilterName = "display-name" + GroupFilterNameCriticality GroupFilterName = "criticality" ) // Values returns all known values for GroupFilterName. Note that this can be @@ -39,6 +42,51 @@ func (GroupFilterName) Values() []GroupFilterName { return []GroupFilterName{ "resource-type", "configuration-type", + "owner", + "display-name", + "criticality", + } +} + +type GroupingStatus string + +// Enum values for GroupingStatus +const ( + GroupingStatusSuccess GroupingStatus = "SUCCESS" + GroupingStatusFailed GroupingStatus = "FAILED" + GroupingStatusInProgress GroupingStatus = "IN_PROGRESS" + GroupingStatusSkipped GroupingStatus = "SKIPPED" +) + +// Values returns all known values for GroupingStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (GroupingStatus) Values() []GroupingStatus { + return []GroupingStatus{ + "SUCCESS", + "FAILED", + "IN_PROGRESS", + "SKIPPED", + } +} + +type GroupingType string + +// Enum values for GroupingType +const ( + GroupingTypeGroup GroupingType = "GROUP" + GroupingTypeUngroup GroupingType = "UNGROUP" +) + +// Values returns all known values for GroupingType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (GroupingType) Values() []GroupingType { + return []GroupingType{ + "GROUP", + "UNGROUP", } } @@ -85,6 +133,26 @@ func (GroupLifecycleEventsStatus) Values() []GroupLifecycleEventsStatus { } } +type ListGroupingStatusesFilterName string + +// Enum values for ListGroupingStatusesFilterName +const ( + ListGroupingStatusesFilterNameStatus ListGroupingStatusesFilterName = "status" + ListGroupingStatusesFilterNameResourceArn ListGroupingStatusesFilterName = "resource-arn" +) + +// Values returns all known values for ListGroupingStatusesFilterName. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ListGroupingStatusesFilterName) Values() []ListGroupingStatusesFilterName { + return []ListGroupingStatusesFilterName{ + "status", + "resource-arn", + } +} + type QueryErrorCode string // Enum values for QueryErrorCode @@ -160,3 +228,22 @@ func (ResourceStatusValue) Values() []ResourceStatusValue { "PENDING", } } + +type TagSyncTaskStatus string + +// Enum values for TagSyncTaskStatus +const ( + TagSyncTaskStatusActive TagSyncTaskStatus = "ACTIVE" + TagSyncTaskStatusError TagSyncTaskStatus = "ERROR" +) + +// Values returns all known values for TagSyncTaskStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TagSyncTaskStatus) Values() []TagSyncTaskStatus { + return []TagSyncTaskStatus{ + "ACTIVE", + "ERROR", + } +} diff --git a/service/resourcegroups/types/types.go b/service/resourcegroups/types/types.go index 4318c440435..bdd70f81ef8 100644 --- a/service/resourcegroups/types/types.go +++ b/service/resourcegroups/types/types.go @@ -4,6 +4,7 @@ package types import ( smithydocument "github.com/aws/smithy-go/document" + "time" ) // The Resource Groups settings for this Amazon Web Services account. @@ -31,7 +32,8 @@ type FailedResource struct { // The error message text associated with the failure. ErrorMessage *string - // The ARN of the resource that failed to be added or removed. + // The Amazon resource name (ARN) of the resource that failed to be added or + // removed. ResourceArn *string noSmithyDocumentSerde @@ -53,7 +55,7 @@ type FailedResource struct { // included in the group. type Group struct { - // The ARN of the resource group. + // The Amazon resource name (ARN) of the resource group. // // This member is required. GroupArn *string @@ -63,9 +65,24 @@ type Group struct { // This member is required. Name *string + // A tag that defines the application group membership. This tag is only supported + // for application groups. + ApplicationTag map[string]string + + // The critical rank of the application group on a scale of 1 to 10, with a rank + // of 1 being the most critical, and a rank of 10 being least critical. + Criticality *int32 + // The description of the resource group. Description *string + // The name of the application group, which you can change at any time. + DisplayName *string + + // A name, email address or other identifier for the person or group who is + // considered as the owner of this application group within your organization. + Owner *string + noSmithyDocumentSerde } @@ -162,12 +179,51 @@ type GroupFilter struct { // The unique identifiers for a resource group. type GroupIdentifier struct { - // The ARN of the resource group. + // The critical rank of the application group on a scale of 1 to 10, with a rank + // of 1 being the most critical, and a rank of 10 being least critical. + Criticality *int32 + + // The description of the application group. + Description *string + + // The name of the application group, which you can change at any time. + DisplayName *string + + // The Amazon resource name (ARN) of the resource group. GroupArn *string // The name of the resource group. GroupName *string + // A name, email address or other identifier for the person or group who is + // considered as the owner of this group within your organization. + Owner *string + + noSmithyDocumentSerde +} + +// The information about a grouping or ungrouping resource action. +type GroupingStatusesItem struct { + + // Describes the resource grouping action with values of GROUP or UNGROUP . + Action GroupingType + + // Specifies the error code that was raised. + ErrorCode *string + + // A message that explains the ErrorCode . + ErrorMessage *string + + // The Amazon resource name (ARN) of a resource. + ResourceArn *string + + // Describes the resource grouping status with values of SUCCESS , FAILED , + // IN_PROGRESS , or SKIPPED . + Status GroupingStatus + + // A timestamp of when the status was last updated. + UpdatedAt *time.Time + noSmithyDocumentSerde } @@ -190,6 +246,24 @@ type GroupQuery struct { noSmithyDocumentSerde } +// A filter name and value pair that is used to obtain more specific results from +// the list of grouping statuses. +type ListGroupingStatusesFilter struct { + + // The name of the filter. Filter names are case-sensitive. + // + // This member is required. + Name ListGroupingStatusesFilterName + + // One or more filter values. Allowed filter values vary by resource filter name, + // and are case-sensitive. + // + // This member is required. + Values []string + + noSmithyDocumentSerde +} + // A structure returned by the ListGroupResources operation that contains identity and group // membership status information for one of the resources in the group. type ListGroupResourcesItem struct { @@ -206,6 +280,19 @@ type ListGroupResourcesItem struct { noSmithyDocumentSerde } +// Returns tag-sync tasks filtered by the Amazon resource name (ARN) or name of a +// specified application group. +type ListTagSyncTasksFilter struct { + + // The Amazon resource name (ARN) of the application group. + GroupArn *string + + // The name of the application group. + GroupName *string + + noSmithyDocumentSerde +} + // A structure that identifies a resource that is currently pending addition to // the group as a member. Adding a resource to a resource group happens // asynchronously as a background task and this one isn't completed yet. @@ -251,7 +338,7 @@ type ResourceFilter struct { // A structure that contains the ARN of a resource and its resource type. type ResourceIdentifier struct { - // The ARN of a resource. + // The Amazon resource name (ARN) of a resource. ResourceArn *string // The resource type of a resource, such as AWS::EC2::Instance . @@ -360,7 +447,7 @@ type ResourceQuery struct { // // - CLOUDFORMATION_STACK_1_0: Specifies that you want the group to contain the // members of an CloudFormation stack. The Query contains a StackIdentifier - // element with an ARN for a CloudFormation stack. + // element with an Amazon resource name (ARN) for a CloudFormation stack. // // - TAG_FILTERS_1_0: Specifies that you want the group to include resource that // have tags that match the query. @@ -383,4 +470,48 @@ type ResourceStatus struct { noSmithyDocumentSerde } +// The Amazon resource name (ARN) of the tag-sync task. +type TagSyncTaskItem struct { + + // The timestamp of when the tag-sync task was created. + CreatedAt *time.Time + + // The specific error message in cases where the tag-sync task status is Error . + ErrorMessage *string + + // The Amazon resource name (ARN) of the application group. + GroupArn *string + + // The name of the application group. + GroupName *string + + // The Amazon resource name (ARN) of the role assumed by the service to tag and + // untag resources on your behalf. + RoleArn *string + + // The status of the tag-sync task. + // + // Valid values include: + // + // - ACTIVE - The tag-sync task is actively managing resources in the application + // by adding or removing the awsApplication tag from resources when they are + // tagged or untagged with the specified tag key-value pair. + // + // - ERROR - The tag-sync task is not actively managing resources in the + // application. Review the ErrorMessage for more information about resolving the + // error. + Status TagSyncTaskStatus + + // The tag key. + TagKey *string + + // The tag value. + TagValue *string + + // The Amazon resource name (ARN) of the tag-sync task. + TaskArn *string + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/resourcegroups/validators.go b/service/resourcegroups/validators.go index d27c7bb63bf..8e9a632f428 100644 --- a/service/resourcegroups/validators.go +++ b/service/resourcegroups/validators.go @@ -10,6 +10,26 @@ import ( "github.com/aws/smithy-go/middleware" ) +type validateOpCancelTagSyncTask struct { +} + +func (*validateOpCancelTagSyncTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCancelTagSyncTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CancelTagSyncTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCancelTagSyncTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateGroup struct { } @@ -50,6 +70,26 @@ func (m *validateOpGetTags) HandleInitialize(ctx context.Context, in middleware. return next.HandleInitialize(ctx, in) } +type validateOpGetTagSyncTask struct { +} + +func (*validateOpGetTagSyncTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetTagSyncTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetTagSyncTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetTagSyncTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGroupResources struct { } @@ -70,6 +110,26 @@ func (m *validateOpGroupResources) HandleInitialize(ctx context.Context, in midd return next.HandleInitialize(ctx, in) } +type validateOpListGroupingStatuses struct { +} + +func (*validateOpListGroupingStatuses) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListGroupingStatuses) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListGroupingStatusesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListGroupingStatusesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpListGroupResources struct { } @@ -150,6 +210,26 @@ func (m *validateOpSearchResources) HandleInitialize(ctx context.Context, in mid return next.HandleInitialize(ctx, in) } +type validateOpStartTagSyncTask struct { +} + +func (*validateOpStartTagSyncTask) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartTagSyncTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartTagSyncTaskInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartTagSyncTaskInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpTag struct { } @@ -230,6 +310,10 @@ func (m *validateOpUpdateGroupQuery) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +func addOpCancelTagSyncTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCancelTagSyncTask{}, middleware.After) +} + func addOpCreateGroupValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateGroup{}, middleware.After) } @@ -238,10 +322,18 @@ func addOpGetTagsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetTags{}, middleware.After) } +func addOpGetTagSyncTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetTagSyncTask{}, middleware.After) +} + func addOpGroupResourcesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGroupResources{}, middleware.After) } +func addOpListGroupingStatusesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListGroupingStatuses{}, middleware.After) +} + func addOpListGroupResourcesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpListGroupResources{}, middleware.After) } @@ -258,6 +350,10 @@ func addOpSearchResourcesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpSearchResources{}, middleware.After) } +func addOpStartTagSyncTaskValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartTagSyncTask{}, middleware.After) +} + func addOpTagValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpTag{}, middleware.After) } @@ -378,6 +474,41 @@ func validateGroupParameterList(v []types.GroupConfigurationParameter) error { } } +func validateListGroupingStatusesFilter(v *types.ListGroupingStatusesFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListGroupingStatusesFilter"} + if len(v.Name) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Values == nil { + invalidParams.Add(smithy.NewErrParamRequired("Values")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateListGroupingStatusesFilterList(v []types.ListGroupingStatusesFilter) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListGroupingStatusesFilterList"} + for i := range v { + if err := validateListGroupingStatusesFilter(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateResourceFilter(v *types.ResourceFilter) error { if v == nil { return nil @@ -431,6 +562,21 @@ func validateResourceQuery(v *types.ResourceQuery) error { } } +func validateOpCancelTagSyncTaskInput(v *CancelTagSyncTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CancelTagSyncTaskInput"} + if v.TaskArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateGroupInput(v *CreateGroupInput) error { if v == nil { return nil @@ -471,6 +617,21 @@ func validateOpGetTagsInput(v *GetTagsInput) error { } } +func validateOpGetTagSyncTaskInput(v *GetTagSyncTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetTagSyncTaskInput"} + if v.TaskArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TaskArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGroupResourcesInput(v *GroupResourcesInput) error { if v == nil { return nil @@ -489,6 +650,26 @@ func validateOpGroupResourcesInput(v *GroupResourcesInput) error { } } +func validateOpListGroupingStatusesInput(v *ListGroupingStatusesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListGroupingStatusesInput"} + if v.Group == nil { + invalidParams.Add(smithy.NewErrParamRequired("Group")) + } + if v.Filters != nil { + if err := validateListGroupingStatusesFilterList(v.Filters); err != nil { + invalidParams.AddNested("Filters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpListGroupResourcesInput(v *ListGroupResourcesInput) error { if v == nil { return nil @@ -559,6 +740,30 @@ func validateOpSearchResourcesInput(v *SearchResourcesInput) error { } } +func validateOpStartTagSyncTaskInput(v *StartTagSyncTaskInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartTagSyncTaskInput"} + if v.Group == nil { + invalidParams.Add(smithy.NewErrParamRequired("Group")) + } + if v.TagKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKey")) + } + if v.TagValue == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagValue")) + } + if v.RoleArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("RoleArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpTagInput(v *TagInput) error { if v == nil { return nil diff --git a/service/supplychain/api_op_CreateBillOfMaterialsImportJob.go b/service/supplychain/api_op_CreateBillOfMaterialsImportJob.go index 2fe6cbdb3fa..65431fb251c 100644 --- a/service/supplychain/api_op_CreateBillOfMaterialsImportJob.go +++ b/service/supplychain/api_op_CreateBillOfMaterialsImportJob.go @@ -46,7 +46,14 @@ type CreateBillOfMaterialsImportJobInput struct { // This member is required. S3uri *string - // An idempotency token. + // An idempotency token ensures the API request is only completed no more than + // once. This way, retrying the request will not trigger the operation multiple + // times. A client token is a unique, case-sensitive string of 33 to 128 ASCII + // characters. To make an idempotent API request, specify a client token in the + // request. You should not reuse the same client token for other requests. If you + // retry a successful request with the same client token, the request will succeed + // with no further actions being taken, and you will receive the same API response + // as the original successful request. ClientToken *string noSmithyDocumentSerde diff --git a/service/supplychain/api_op_CreateDataIntegrationFlow.go b/service/supplychain/api_op_CreateDataIntegrationFlow.go new file mode 100644 index 00000000000..2ea233d9cc0 --- /dev/null +++ b/service/supplychain/api_op_CreateDataIntegrationFlow.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Create DataIntegrationFlow to map one or more different sources to one target +// using the SQL transformation query. +func (c *Client) CreateDataIntegrationFlow(ctx context.Context, params *CreateDataIntegrationFlowInput, optFns ...func(*Options)) (*CreateDataIntegrationFlowOutput, error) { + if params == nil { + params = &CreateDataIntegrationFlowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateDataIntegrationFlow", params, optFns, c.addOperationCreateDataIntegrationFlowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateDataIntegrationFlowOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for CreateDataIntegrationFlow. +type CreateDataIntegrationFlowInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // Name of the DataIntegrationFlow. + // + // This member is required. + Name *string + + // The source configurations for DataIntegrationFlow. + // + // This member is required. + Sources []types.DataIntegrationFlowSource + + // The target configurations for DataIntegrationFlow. + // + // This member is required. + Target *types.DataIntegrationFlowTarget + + // The transformation configurations for DataIntegrationFlow. + // + // This member is required. + Transformation *types.DataIntegrationFlowTransformation + + // The tags of the DataIntegrationFlow to be created + Tags map[string]string + + noSmithyDocumentSerde +} + +// The response parameters for CreateDataIntegrationFlow. +type CreateDataIntegrationFlowOutput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the DataIntegrationFlow created. + // + // This member is required. + Name *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateDataIntegrationFlowMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateDataIntegrationFlow"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateDataIntegrationFlowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDataIntegrationFlow(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateDataIntegrationFlow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateDataIntegrationFlow", + } +} diff --git a/service/supplychain/api_op_CreateDataLakeDataset.go b/service/supplychain/api_op_CreateDataLakeDataset.go new file mode 100644 index 00000000000..2a6f2316b9f --- /dev/null +++ b/service/supplychain/api_op_CreateDataLakeDataset.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Create a data lake dataset. +func (c *Client) CreateDataLakeDataset(ctx context.Context, params *CreateDataLakeDatasetInput, optFns ...func(*Options)) (*CreateDataLakeDatasetOutput, error) { + if params == nil { + params = &CreateDataLakeDatasetInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateDataLakeDataset", params, optFns, c.addOperationCreateDataLakeDatasetMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateDataLakeDatasetOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for CreateDataLakeDataset. +type CreateDataLakeDatasetInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the dataset. For asc name space, the name must be one of the + // supported data entities under [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Name *string + + // The name space of the dataset. + // + // - asc - For information on the Amazon Web Services Supply Chain supported + // datasets see [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // - default - For datasets with custom user-defined schemas. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Namespace *string + + // The description of the dataset. + Description *string + + // The custom schema of the data lake dataset and is only required when the name + // space is default. + Schema *types.DataLakeDatasetSchema + + // The tags of the dataset. + Tags map[string]string + + noSmithyDocumentSerde +} + +// The response parameters of CreateDataLakeDataset. +type CreateDataLakeDatasetOutput struct { + + // The detail of created dataset. + // + // This member is required. + Dataset *types.DataLakeDataset + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateDataLakeDatasetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateDataLakeDataset"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpCreateDataLakeDatasetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDataLakeDataset(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateDataLakeDataset(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateDataLakeDataset", + } +} diff --git a/service/supplychain/api_op_DeleteDataIntegrationFlow.go b/service/supplychain/api_op_DeleteDataIntegrationFlow.go new file mode 100644 index 00000000000..a9eac433a37 --- /dev/null +++ b/service/supplychain/api_op_DeleteDataIntegrationFlow.go @@ -0,0 +1,170 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Delete the DataIntegrationFlow. +func (c *Client) DeleteDataIntegrationFlow(ctx context.Context, params *DeleteDataIntegrationFlowInput, optFns ...func(*Options)) (*DeleteDataIntegrationFlowOutput, error) { + if params == nil { + params = &DeleteDataIntegrationFlowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteDataIntegrationFlow", params, optFns, c.addOperationDeleteDataIntegrationFlowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteDataIntegrationFlowOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for DeleteDataIntegrationFlow. +type DeleteDataIntegrationFlowInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the DataIntegrationFlow to be deleted. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +// The response parameters for DeleteDataIntegrationFlow. +type DeleteDataIntegrationFlowOutput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the DataIntegrationFlow deleted. + // + // This member is required. + Name *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteDataIntegrationFlowMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteDataIntegrationFlow"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteDataIntegrationFlowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDataIntegrationFlow(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteDataIntegrationFlow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteDataIntegrationFlow", + } +} diff --git a/service/supplychain/api_op_DeleteDataLakeDataset.go b/service/supplychain/api_op_DeleteDataLakeDataset.go new file mode 100644 index 00000000000..0fe37b1d3d3 --- /dev/null +++ b/service/supplychain/api_op_DeleteDataLakeDataset.go @@ -0,0 +1,189 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Delete a data lake dataset. +func (c *Client) DeleteDataLakeDataset(ctx context.Context, params *DeleteDataLakeDatasetInput, optFns ...func(*Options)) (*DeleteDataLakeDatasetOutput, error) { + if params == nil { + params = &DeleteDataLakeDatasetInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteDataLakeDataset", params, optFns, c.addOperationDeleteDataLakeDatasetMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteDataLakeDatasetOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters of DeleteDataLakeDataset. +type DeleteDataLakeDatasetInput struct { + + // The AWS Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the dataset. If the namespace is asc, the name must be one of the + // supported [data entities]. + // + // [data entities]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Name *string + + // The namespace of the dataset. The available values are: + // + // - asc: for [AWS Supply Chain supported datasets]. + // + // - default: for datasets with custom user-defined schemas. + // + // [AWS Supply Chain supported datasets]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Namespace *string + + noSmithyDocumentSerde +} + +// The response parameters of DeleteDataLakeDataset. +type DeleteDataLakeDatasetOutput struct { + + // The AWS Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of deleted dataset. + // + // This member is required. + Name *string + + // The namespace of deleted dataset. + // + // This member is required. + Namespace *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteDataLakeDatasetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpDeleteDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpDeleteDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteDataLakeDataset"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpDeleteDataLakeDatasetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDataLakeDataset(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteDataLakeDataset(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteDataLakeDataset", + } +} diff --git a/service/supplychain/api_op_GetDataIntegrationFlow.go b/service/supplychain/api_op_GetDataIntegrationFlow.go new file mode 100644 index 00000000000..f0c733c9cb6 --- /dev/null +++ b/service/supplychain/api_op_GetDataIntegrationFlow.go @@ -0,0 +1,166 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// View the DataIntegrationFlow details. +func (c *Client) GetDataIntegrationFlow(ctx context.Context, params *GetDataIntegrationFlowInput, optFns ...func(*Options)) (*GetDataIntegrationFlowOutput, error) { + if params == nil { + params = &GetDataIntegrationFlowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDataIntegrationFlow", params, optFns, c.addOperationGetDataIntegrationFlowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDataIntegrationFlowOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for GetDataIntegrationFlow. +type GetDataIntegrationFlowInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the DataIntegrationFlow created. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +// The response parameters for GetDataIntegrationFlow. +type GetDataIntegrationFlowOutput struct { + + // The details of the DataIntegrationFlow returned. + // + // This member is required. + Flow *types.DataIntegrationFlow + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDataIntegrationFlowMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetDataIntegrationFlow"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetDataIntegrationFlowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDataIntegrationFlow(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDataIntegrationFlow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetDataIntegrationFlow", + } +} diff --git a/service/supplychain/api_op_GetDataLakeDataset.go b/service/supplychain/api_op_GetDataLakeDataset.go new file mode 100644 index 00000000000..cf912563d4e --- /dev/null +++ b/service/supplychain/api_op_GetDataLakeDataset.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Get a data lake dataset. +func (c *Client) GetDataLakeDataset(ctx context.Context, params *GetDataLakeDatasetInput, optFns ...func(*Options)) (*GetDataLakeDatasetOutput, error) { + if params == nil { + params = &GetDataLakeDatasetInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetDataLakeDataset", params, optFns, c.addOperationGetDataLakeDatasetMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetDataLakeDatasetOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for GetDataLakeDataset. +type GetDataLakeDatasetInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the dataset. For asc name space, the name must be one of the + // supported data entities under [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Name *string + + // The name space of the dataset. The available values are: + // + // - asc - For information on the Amazon Web Services Supply Chain supported + // datasets see [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // - default - For datasets with custom user-defined schemas. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Namespace *string + + noSmithyDocumentSerde +} + +// The response parameters for UpdateDataLakeDataset. +type GetDataLakeDatasetOutput struct { + + // The fetched dataset details. + // + // This member is required. + Dataset *types.DataLakeDataset + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetDataLakeDatasetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpGetDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpGetDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetDataLakeDataset"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpGetDataLakeDatasetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetDataLakeDataset(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetDataLakeDataset(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetDataLakeDataset", + } +} diff --git a/service/supplychain/api_op_ListDataIntegrationFlows.go b/service/supplychain/api_op_ListDataIntegrationFlows.go new file mode 100644 index 00000000000..2c142ed3fab --- /dev/null +++ b/service/supplychain/api_op_ListDataIntegrationFlows.go @@ -0,0 +1,267 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Lists all the DataIntegrationFlows in a paginated way. +func (c *Client) ListDataIntegrationFlows(ctx context.Context, params *ListDataIntegrationFlowsInput, optFns ...func(*Options)) (*ListDataIntegrationFlowsOutput, error) { + if params == nil { + params = &ListDataIntegrationFlowsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDataIntegrationFlows", params, optFns, c.addOperationListDataIntegrationFlowsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDataIntegrationFlowsOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for ListDataIntegrationFlows. +type ListDataIntegrationFlowsInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // Specify the maximum number of DataIntegrationFlows to fetch in one paginated + // request. + MaxResults *int32 + + // The pagination token to fetch the next page of the DataIntegrationFlows. + NextToken *string + + noSmithyDocumentSerde +} + +// The response parameters for ListDataIntegrationFlows. +type ListDataIntegrationFlowsOutput struct { + + // The response parameters for ListDataIntegrationFlows. + // + // This member is required. + Flows []types.DataIntegrationFlow + + // The pagination token to fetch the next page of the DataIntegrationFlows. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDataIntegrationFlowsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListDataIntegrationFlows{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListDataIntegrationFlows{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDataIntegrationFlows"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListDataIntegrationFlowsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDataIntegrationFlows(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListDataIntegrationFlowsPaginatorOptions is the paginator options for +// ListDataIntegrationFlows +type ListDataIntegrationFlowsPaginatorOptions struct { + // Specify the maximum number of DataIntegrationFlows to fetch in one paginated + // request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDataIntegrationFlowsPaginator is a paginator for ListDataIntegrationFlows +type ListDataIntegrationFlowsPaginator struct { + options ListDataIntegrationFlowsPaginatorOptions + client ListDataIntegrationFlowsAPIClient + params *ListDataIntegrationFlowsInput + nextToken *string + firstPage bool +} + +// NewListDataIntegrationFlowsPaginator returns a new +// ListDataIntegrationFlowsPaginator +func NewListDataIntegrationFlowsPaginator(client ListDataIntegrationFlowsAPIClient, params *ListDataIntegrationFlowsInput, optFns ...func(*ListDataIntegrationFlowsPaginatorOptions)) *ListDataIntegrationFlowsPaginator { + if params == nil { + params = &ListDataIntegrationFlowsInput{} + } + + options := ListDataIntegrationFlowsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDataIntegrationFlowsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDataIntegrationFlowsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDataIntegrationFlows page. +func (p *ListDataIntegrationFlowsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDataIntegrationFlowsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListDataIntegrationFlows(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListDataIntegrationFlowsAPIClient is a client that implements the +// ListDataIntegrationFlows operation. +type ListDataIntegrationFlowsAPIClient interface { + ListDataIntegrationFlows(context.Context, *ListDataIntegrationFlowsInput, ...func(*Options)) (*ListDataIntegrationFlowsOutput, error) +} + +var _ ListDataIntegrationFlowsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListDataIntegrationFlows(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDataIntegrationFlows", + } +} diff --git a/service/supplychain/api_op_ListDataLakeDatasets.go b/service/supplychain/api_op_ListDataLakeDatasets.go new file mode 100644 index 00000000000..d6f70510140 --- /dev/null +++ b/service/supplychain/api_op_ListDataLakeDatasets.go @@ -0,0 +1,275 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// List the data lake datasets for a specific instance and name space. +func (c *Client) ListDataLakeDatasets(ctx context.Context, params *ListDataLakeDatasetsInput, optFns ...func(*Options)) (*ListDataLakeDatasetsOutput, error) { + if params == nil { + params = &ListDataLakeDatasetsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDataLakeDatasets", params, optFns, c.addOperationListDataLakeDatasetsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDataLakeDatasetsOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters of ListDataLakeDatasets. +type ListDataLakeDatasetsInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The namespace of the dataset. The available values are: + // + // - asc: for [AWS Supply Chain supported datasets]. + // + // - default: for datasets with custom user-defined schemas. + // + // [AWS Supply Chain supported datasets]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Namespace *string + + // The max number of datasets to fetch in this paginated request. + MaxResults *int32 + + // The pagination token to fetch next page of datasets. + NextToken *string + + noSmithyDocumentSerde +} + +// The response parameters of ListDataLakeDatasets. +type ListDataLakeDatasetsOutput struct { + + // The list of fetched dataset details. + // + // This member is required. + Datasets []types.DataLakeDataset + + // The pagination token to fetch next page of datasets. + NextToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDataLakeDatasetsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListDataLakeDatasets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListDataLakeDatasets{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDataLakeDatasets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListDataLakeDatasetsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDataLakeDatasets(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +// ListDataLakeDatasetsPaginatorOptions is the paginator options for +// ListDataLakeDatasets +type ListDataLakeDatasetsPaginatorOptions struct { + // The max number of datasets to fetch in this paginated request. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDataLakeDatasetsPaginator is a paginator for ListDataLakeDatasets +type ListDataLakeDatasetsPaginator struct { + options ListDataLakeDatasetsPaginatorOptions + client ListDataLakeDatasetsAPIClient + params *ListDataLakeDatasetsInput + nextToken *string + firstPage bool +} + +// NewListDataLakeDatasetsPaginator returns a new ListDataLakeDatasetsPaginator +func NewListDataLakeDatasetsPaginator(client ListDataLakeDatasetsAPIClient, params *ListDataLakeDatasetsInput, optFns ...func(*ListDataLakeDatasetsPaginatorOptions)) *ListDataLakeDatasetsPaginator { + if params == nil { + params = &ListDataLakeDatasetsInput{} + } + + options := ListDataLakeDatasetsPaginatorOptions{} + if params.MaxResults != nil { + options.Limit = *params.MaxResults + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDataLakeDatasetsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.NextToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDataLakeDatasetsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDataLakeDatasets page. +func (p *ListDataLakeDatasetsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDataLakeDatasetsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.NextToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxResults = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListDataLakeDatasets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListDataLakeDatasetsAPIClient is a client that implements the +// ListDataLakeDatasets operation. +type ListDataLakeDatasetsAPIClient interface { + ListDataLakeDatasets(context.Context, *ListDataLakeDatasetsInput, ...func(*Options)) (*ListDataLakeDatasetsOutput, error) +} + +var _ ListDataLakeDatasetsAPIClient = (*Client)(nil) + +func newServiceMetadataMiddleware_opListDataLakeDatasets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDataLakeDatasets", + } +} diff --git a/service/supplychain/api_op_ListTagsForResource.go b/service/supplychain/api_op_ListTagsForResource.go new file mode 100644 index 00000000000..dff252f6e81 --- /dev/null +++ b/service/supplychain/api_op_ListTagsForResource.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// List all the tags for an Amazon Web ServicesSupply Chain resource. +func (c *Client) ListTagsForResource(ctx context.Context, params *ListTagsForResourceInput, optFns ...func(*Options)) (*ListTagsForResourceOutput, error) { + if params == nil { + params = &ListTagsForResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListTagsForResource", params, optFns, c.addOperationListTagsForResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListTagsForResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters of ListTagsForResource. +type ListTagsForResourceInput struct { + + // The Amazon Web Services Supply chain resource ARN that needs tags to be listed. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +// The response parameters of ListTagsForResource. +type ListTagsForResourceOutput struct { + + // The tags added to an Amazon Web Services Supply Chain resource. + // + // This member is required. + Tags map[string]string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpListTagsForResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsForResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsForResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opListTagsForResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListTagsForResource", + } +} diff --git a/service/supplychain/api_op_TagResource.go b/service/supplychain/api_op_TagResource.go new file mode 100644 index 00000000000..607d4a2cf9a --- /dev/null +++ b/service/supplychain/api_op_TagResource.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Create tags for an Amazon Web Services Supply chain resource. +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters of TagResource. +type TagResourceInput struct { + + // The Amazon Web Services Supply chain resource ARN that needs to be tagged. + // + // This member is required. + ResourceArn *string + + // The tags of the Amazon Web Services Supply chain resource to be created. + // + // This member is required. + Tags map[string]string + + noSmithyDocumentSerde +} + +// The response parameters for TagResource. +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "TagResource", + } +} diff --git a/service/supplychain/api_op_UntagResource.go b/service/supplychain/api_op_UntagResource.go new file mode 100644 index 00000000000..87d04f35bb1 --- /dev/null +++ b/service/supplychain/api_op_UntagResource.go @@ -0,0 +1,160 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Delete tags for an Amazon Web Services Supply chain resource. +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters of UntagResource. +type UntagResourceInput struct { + + // The Amazon Web Services Supply chain resource ARN that needs to be untagged. + // + // This member is required. + ResourceArn *string + + // The list of tag keys to be deleted for an Amazon Web Services Supply Chain + // resource. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +// The response parameters of UntagResource. +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UntagResource", + } +} diff --git a/service/supplychain/api_op_UpdateDataIntegrationFlow.go b/service/supplychain/api_op_UpdateDataIntegrationFlow.go new file mode 100644 index 00000000000..9e646e88daf --- /dev/null +++ b/service/supplychain/api_op_UpdateDataIntegrationFlow.go @@ -0,0 +1,175 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Update the DataIntegrationFlow. +func (c *Client) UpdateDataIntegrationFlow(ctx context.Context, params *UpdateDataIntegrationFlowInput, optFns ...func(*Options)) (*UpdateDataIntegrationFlowOutput, error) { + if params == nil { + params = &UpdateDataIntegrationFlowInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateDataIntegrationFlow", params, optFns, c.addOperationUpdateDataIntegrationFlowMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateDataIntegrationFlowOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters for UpdateDataIntegrationFlow. +type UpdateDataIntegrationFlowInput struct { + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the DataIntegrationFlow to be updated. + // + // This member is required. + Name *string + + // The new source configurations for the DataIntegrationFlow. + Sources []types.DataIntegrationFlowSource + + // The new target configurations for the DataIntegrationFlow. + Target *types.DataIntegrationFlowTarget + + // The new transformation configurations for the DataIntegrationFlow. + Transformation *types.DataIntegrationFlowTransformation + + noSmithyDocumentSerde +} + +// The response parameters for UpdateDataIntegrationFlow. +type UpdateDataIntegrationFlowOutput struct { + + // The details of the updated DataIntegrationFlow. + // + // This member is required. + Flow *types.DataIntegrationFlow + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateDataIntegrationFlowMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateDataIntegrationFlow{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateDataIntegrationFlow"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateDataIntegrationFlowValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateDataIntegrationFlow(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateDataIntegrationFlow(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateDataIntegrationFlow", + } +} diff --git a/service/supplychain/api_op_UpdateDataLakeDataset.go b/service/supplychain/api_op_UpdateDataLakeDataset.go new file mode 100644 index 00000000000..ecf3ec53a59 --- /dev/null +++ b/service/supplychain/api_op_UpdateDataLakeDataset.go @@ -0,0 +1,184 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package supplychain + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Update a data lake dataset. +func (c *Client) UpdateDataLakeDataset(ctx context.Context, params *UpdateDataLakeDatasetInput, optFns ...func(*Options)) (*UpdateDataLakeDatasetOutput, error) { + if params == nil { + params = &UpdateDataLakeDatasetInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateDataLakeDataset", params, optFns, c.addOperationUpdateDataLakeDatasetMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateDataLakeDatasetOutput) + out.ResultMetadata = metadata + return out, nil +} + +// The request parameters of UpdateDataLakeDataset. +type UpdateDataLakeDatasetInput struct { + + // The Amazon Web Services Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The name of the dataset. For asc name space, the name must be one of the + // supported data entities under [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Name *string + + // The name space of the dataset. The available values are: + // + // - asc - For information on the Amazon Web Services Supply Chain supported + // datasets see [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // - default - For datasets with custom user-defined schemas. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Namespace *string + + // The updated description of the data lake dataset. + Description *string + + noSmithyDocumentSerde +} + +// The response parameters of UpdateDataLakeDataset. +type UpdateDataLakeDatasetOutput struct { + + // The updated dataset details. + // + // This member is required. + Dataset *types.DataLakeDataset + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateDataLakeDatasetMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpUpdateDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpUpdateDataLakeDataset{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateDataLakeDataset"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpUpdateDataLakeDatasetValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateDataLakeDataset(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateDataLakeDataset(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "UpdateDataLakeDataset", + } +} diff --git a/service/supplychain/deserializers.go b/service/supplychain/deserializers.go index 6bb566c8aee..b4eb0b9aca8 100644 --- a/service/supplychain/deserializers.go +++ b/service/supplychain/deserializers.go @@ -203,14 +203,14 @@ func awsRestjson1_deserializeOpDocumentCreateBillOfMaterialsImportJobOutput(v ** return nil } -type awsRestjson1_deserializeOpGetBillOfMaterialsImportJob struct { +type awsRestjson1_deserializeOpCreateDataIntegrationFlow struct { } -func (*awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) ID() string { +func (*awsRestjson1_deserializeOpCreateDataIntegrationFlow) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpCreateDataIntegrationFlow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -228,9 +228,9 @@ func (m *awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorGetBillOfMaterialsImportJob(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorCreateDataIntegrationFlow(response, &metadata) } - output := &GetBillOfMaterialsImportJobOutput{} + output := &CreateDataIntegrationFlowOutput{} out.Result = output var buff [1024]byte @@ -251,7 +251,7 @@ func (m *awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) HandleDeserializ return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentGetBillOfMaterialsImportJobOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentCreateDataIntegrationFlowOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -265,7 +265,7 @@ func (m *awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) HandleDeserializ return out, metadata, err } -func awsRestjson1_deserializeOpErrorGetBillOfMaterialsImportJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorCreateDataIntegrationFlow(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -337,7 +337,7 @@ func awsRestjson1_deserializeOpErrorGetBillOfMaterialsImportJob(response *smithy } } -func awsRestjson1_deserializeOpDocumentGetBillOfMaterialsImportJobOutput(v **GetBillOfMaterialsImportJobOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentCreateDataIntegrationFlowOutput(v **CreateDataIntegrationFlowOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -350,18 +350,31 @@ func awsRestjson1_deserializeOpDocumentGetBillOfMaterialsImportJobOutput(v **Get return fmt.Errorf("unexpected JSON type %v", value) } - var sv *GetBillOfMaterialsImportJobOutput + var sv *CreateDataIntegrationFlowOutput if *v == nil { - sv = &GetBillOfMaterialsImportJobOutput{} + sv = &CreateDataIntegrationFlowOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "job": - if err := awsRestjson1_deserializeDocumentBillOfMaterialsImportJob(&sv.Job, value); err != nil { - return err + case "instanceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.InstanceId = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) } default: @@ -373,14 +386,14 @@ func awsRestjson1_deserializeOpDocumentGetBillOfMaterialsImportJobOutput(v **Get return nil } -type awsRestjson1_deserializeOpSendDataIntegrationEvent struct { +type awsRestjson1_deserializeOpCreateDataLakeDataset struct { } -func (*awsRestjson1_deserializeOpSendDataIntegrationEvent) ID() string { +func (*awsRestjson1_deserializeOpCreateDataLakeDataset) ID() string { return "OperationDeserializer" } -func (m *awsRestjson1_deserializeOpSendDataIntegrationEvent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestjson1_deserializeOpCreateDataLakeDataset) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -398,9 +411,9 @@ func (m *awsRestjson1_deserializeOpSendDataIntegrationEvent) HandleDeserialize(c } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestjson1_deserializeOpErrorSendDataIntegrationEvent(response, &metadata) + return out, metadata, awsRestjson1_deserializeOpErrorCreateDataLakeDataset(response, &metadata) } - output := &SendDataIntegrationEventOutput{} + output := &CreateDataLakeDatasetOutput{} out.Result = output var buff [1024]byte @@ -421,7 +434,7 @@ func (m *awsRestjson1_deserializeOpSendDataIntegrationEvent) HandleDeserialize(c return out, metadata, err } - err = awsRestjson1_deserializeOpDocumentSendDataIntegrationEventOutput(&output, shape) + err = awsRestjson1_deserializeOpDocumentCreateDataLakeDatasetOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -435,7 +448,7 @@ func (m *awsRestjson1_deserializeOpSendDataIntegrationEvent) HandleDeserialize(c return out, metadata, err } -func awsRestjson1_deserializeOpErrorSendDataIntegrationEvent(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestjson1_deserializeOpErrorCreateDataLakeDataset(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -507,7 +520,7 @@ func awsRestjson1_deserializeOpErrorSendDataIntegrationEvent(response *smithyhtt } } -func awsRestjson1_deserializeOpDocumentSendDataIntegrationEventOutput(v **SendDataIntegrationEventOutput, value interface{}) error { +func awsRestjson1_deserializeOpDocumentCreateDataLakeDatasetOutput(v **CreateDataLakeDatasetOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -520,22 +533,18 @@ func awsRestjson1_deserializeOpDocumentSendDataIntegrationEventOutput(v **SendDa return fmt.Errorf("unexpected JSON type %v", value) } - var sv *SendDataIntegrationEventOutput + var sv *CreateDataLakeDatasetOutput if *v == nil { - sv = &SendDataIntegrationEventOutput{} + sv = &CreateDataLakeDatasetOutput{} } else { sv = *v } for key, value := range shape { switch key { - case "eventId": - if value != nil { - jtv, ok := value.(string) - if !ok { - return fmt.Errorf("expected UUID to be of type string, got %T instead", value) - } - sv.EventId = ptr.String(jtv) + case "dataset": + if err := awsRestjson1_deserializeDocumentDataLakeDataset(&sv.Dataset, value); err != nil { + return err } default: @@ -547,12 +556,41 @@ func awsRestjson1_deserializeOpDocumentSendDataIntegrationEventOutput(v **SendDa return nil } -func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.AccessDeniedException{} +type awsRestjson1_deserializeOpDeleteDataIntegrationFlow struct { +} + +func (*awsRestjson1_deserializeOpDeleteDataIntegrationFlow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpDeleteDataIntegrationFlow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteDataIntegrationFlow(response, &metadata) + } + output := &DeleteDataIntegrationFlowOutput{} + out.Result = output + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) decoder.UseNumber() var shape interface{} @@ -563,36 +601,46 @@ func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Res Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) - + err = awsRestjson1_deserializeOpDocumentDeleteDataIntegrationFlowOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), Snapshot: snapshot.Bytes(), } - return err } - errorBody.Seek(0, io.SeekStart) - - return output + span.End() + return out, metadata, err } -func awsRestjson1_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ConflictException{} +func awsRestjson1_deserializeOpErrorDeleteDataIntegrationFlow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) decoder := json.NewDecoder(body) decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) err = &smithy.DeserializationError{ @@ -602,65 +650,130 @@ func awsRestjson1_deserializeErrorConflictException(response *smithyhttp.Respons return err } - err := awsRestjson1_deserializeDocumentConflictException(&output, shape) + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, } - return err + return genericError + } +} - errorBody.Seek(0, io.SeekStart) +func awsRestjson1_deserializeOpDocumentDeleteDataIntegrationFlowOutput(v **DeleteDataIntegrationFlowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - return output -} + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } -func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InternalServerException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) + var sv *DeleteDataIntegrationFlowOutput + if *v == nil { + sv = &DeleteDataIntegrationFlowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "instanceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.InstanceId = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + default: + _, _ = key, value - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), } - return err } + *v = sv + return nil +} - err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) +type awsRestjson1_deserializeOpDeleteDataLakeDataset struct { +} + +func (*awsRestjson1_deserializeOpDeleteDataLakeDataset) ID() string { + return "OperationDeserializer" +} +func (m *awsRestjson1_deserializeOpDeleteDataLakeDataset) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err + return out, metadata, err } - errorBody.Seek(0, io.SeekStart) + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } - return output -} + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorDeleteDataLakeDataset(response, &metadata) + } + output := &DeleteDataLakeDatasetOutput{} + out.Result = output -func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ResourceNotFoundException{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) decoder.UseNumber() var shape interface{} @@ -671,32 +784,188 @@ func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) - + err = awsRestjson1_deserializeOpDocumentDeleteDataLakeDatasetOutput(&output, shape) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), Snapshot: snapshot.Bytes(), } - return err } - errorBody.Seek(0, io.SeekStart) - - return output + span.End() + return out, metadata, err } -func awsRestjson1_deserializeErrorServiceQuotaExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ServiceQuotaExceededException{} +func awsRestjson1_deserializeOpErrorDeleteDataLakeDataset(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentDeleteDataLakeDatasetOutput(v **DeleteDataLakeDatasetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteDataLakeDatasetOutput + if *v == nil { + sv = &DeleteDataLakeDatasetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "instanceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.InstanceId = ptr.String(jtv) + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataLakeDatasetName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "namespace": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataLakeDatasetNamespace to be of type string, got %T instead", value) + } + sv.Namespace = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetBillOfMaterialsImportJob struct { +} + +func (*awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetBillOfMaterialsImportJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetBillOfMaterialsImportJob(response, &metadata) + } + output := &GetBillOfMaterialsImportJobOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) decoder.UseNumber() var shape interface{} @@ -707,11 +976,45 @@ func awsRestjson1_deserializeErrorServiceQuotaExceededException(response *smithy Err: fmt.Errorf("failed to decode response body, %w", err), Snapshot: snapshot.Bytes(), } - return err + return out, metadata, err } - err := awsRestjson1_deserializeDocumentServiceQuotaExceededException(&output, shape) + err = awsRestjson1_deserializeOpDocumentGetBillOfMaterialsImportJobOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetBillOfMaterialsImportJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -723,83 +1026,2684 @@ func awsRestjson1_deserializeErrorServiceQuotaExceededException(response *smithy } errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } - return output + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } } -func awsRestjson1_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ThrottlingException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) +func awsRestjson1_deserializeOpDocumentGetBillOfMaterialsImportJobOutput(v **GetBillOfMaterialsImportJobOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetBillOfMaterialsImportJobOutput + if *v == nil { + sv = &GetBillOfMaterialsImportJobOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "job": + if err := awsRestjson1_deserializeDocumentBillOfMaterialsImportJob(&sv.Job, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetDataIntegrationFlow struct { +} + +func (*awsRestjson1_deserializeOpGetDataIntegrationFlow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetDataIntegrationFlow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetDataIntegrationFlow(response, &metadata) + } + output := &GetDataIntegrationFlowOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetDataIntegrationFlowOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetDataIntegrationFlow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetDataIntegrationFlowOutput(v **GetDataIntegrationFlowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetDataIntegrationFlowOutput + if *v == nil { + sv = &GetDataIntegrationFlowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "flow": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlow(&sv.Flow, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpGetDataLakeDataset struct { +} + +func (*awsRestjson1_deserializeOpGetDataLakeDataset) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpGetDataLakeDataset) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorGetDataLakeDataset(response, &metadata) + } + output := &GetDataLakeDatasetOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentGetDataLakeDatasetOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorGetDataLakeDataset(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentGetDataLakeDatasetOutput(v **GetDataLakeDatasetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetDataLakeDatasetOutput + if *v == nil { + sv = &GetDataLakeDatasetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "dataset": + if err := awsRestjson1_deserializeDocumentDataLakeDataset(&sv.Dataset, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListDataIntegrationFlows struct { +} + +func (*awsRestjson1_deserializeOpListDataIntegrationFlows) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListDataIntegrationFlows) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListDataIntegrationFlows(response, &metadata) + } + output := &ListDataIntegrationFlowsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListDataIntegrationFlowsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListDataIntegrationFlows(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListDataIntegrationFlowsOutput(v **ListDataIntegrationFlowsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDataIntegrationFlowsOutput + if *v == nil { + sv = &ListDataIntegrationFlowsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "flows": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowList(&sv.Flows, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowNextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListDataLakeDatasets struct { +} + +func (*awsRestjson1_deserializeOpListDataLakeDatasets) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListDataLakeDatasets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListDataLakeDatasets(response, &metadata) + } + output := &ListDataLakeDatasetsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListDataLakeDatasetsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListDataLakeDatasets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListDataLakeDatasetsOutput(v **ListDataLakeDatasetsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListDataLakeDatasetsOutput + if *v == nil { + sv = &ListDataLakeDatasetsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "datasets": + if err := awsRestjson1_deserializeDocumentDataLakeDatasetList(&sv.Datasets, value); err != nil { + return err + } + + case "nextToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataLakeDatasetNextToken to be of type string, got %T instead", value) + } + sv.NextToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpListTagsForResource struct { +} + +func (*awsRestjson1_deserializeOpListTagsForResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpListTagsForResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorListTagsForResource(response, &metadata) + } + output := &ListTagsForResourceOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorListTagsForResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentListTagsForResourceOutput(v **ListTagsForResourceOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListTagsForResourceOutput + if *v == nil { + sv = &ListTagsForResourceOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "tags": + if err := awsRestjson1_deserializeDocumentTagMap(&sv.Tags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpSendDataIntegrationEvent struct { +} + +func (*awsRestjson1_deserializeOpSendDataIntegrationEvent) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpSendDataIntegrationEvent) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorSendDataIntegrationEvent(response, &metadata) + } + output := &SendDataIntegrationEventOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentSendDataIntegrationEventOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorSendDataIntegrationEvent(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentSendDataIntegrationEventOutput(v **SendDataIntegrationEventOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SendDataIntegrationEventOutput + if *v == nil { + sv = &SendDataIntegrationEventOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "eventId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.EventId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpTagResource struct { +} + +func (*awsRestjson1_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUntagResource struct { +} + +func (*awsRestjson1_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsRestjson1_deserializeOpUpdateDataIntegrationFlow struct { +} + +func (*awsRestjson1_deserializeOpUpdateDataIntegrationFlow) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUpdateDataIntegrationFlow) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUpdateDataIntegrationFlow(response, &metadata) + } + output := &UpdateDataIntegrationFlowOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentUpdateDataIntegrationFlowOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUpdateDataIntegrationFlow(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentUpdateDataIntegrationFlowOutput(v **UpdateDataIntegrationFlowOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateDataIntegrationFlowOutput + if *v == nil { + sv = &UpdateDataIntegrationFlowOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "flow": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlow(&sv.Flow, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpUpdateDataLakeDataset struct { +} + +func (*awsRestjson1_deserializeOpUpdateDataLakeDataset) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpUpdateDataLakeDataset) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorUpdateDataLakeDataset(response, &metadata) + } + output := &UpdateDataLakeDatasetOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentUpdateDataLakeDatasetOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorUpdateDataLakeDataset(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ServiceQuotaExceededException", errorCode): + return awsRestjson1_deserializeErrorServiceQuotaExceededException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentUpdateDataLakeDatasetOutput(v **UpdateDataLakeDatasetOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateDataLakeDatasetOutput + if *v == nil { + sv = &UpdateDataLakeDatasetOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "dataset": + if err := awsRestjson1_deserializeDocumentDataLakeDataset(&sv.Dataset, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ConflictException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentConflictException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentResourceNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorServiceQuotaExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ServiceQuotaExceededException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentServiceQuotaExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ThrottlingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentThrottlingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ValidationException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentValidationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentBillOfMaterialsImportJob(v **types.BillOfMaterialsImportJob, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.BillOfMaterialsImportJob + if *v == nil { + sv = &types.BillOfMaterialsImportJob{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "instanceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.InstanceId = ptr.String(jtv) + } + + case "jobId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.JobId = ptr.String(jtv) + } + + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + case "s3uri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConfigurationS3Uri to be of type string, got %T instead", value) + } + sv.S3uri = ptr.String(jtv) + } + + case "status": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConfigurationJobStatus to be of type string, got %T instead", value) + } + sv.Status = types.ConfigurationJobStatus(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ConflictException + if *v == nil { + sv = &types.ConflictException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected String to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlow(v **types.DataIntegrationFlow, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlow + if *v == nil { + sv = &types.DataIntegrationFlow{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "createdTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "instanceId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + } + sv.InstanceId = ptr.String(jtv) + } + + case "lastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowName to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "sources": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowSourceList(&sv.Sources, value); err != nil { + return err + } + + case "target": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowTarget(&sv.Target, value); err != nil { + return err + } + + case "transformation": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowTransformation(&sv.Transformation, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetOptions(v **types.DataIntegrationFlowDatasetOptions, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowDatasetOptions + if *v == nil { + sv = &types.DataIntegrationFlowDatasetOptions{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "dedupeRecords": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.DedupeRecords = ptr.Bool(jtv) + } + + case "loadType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowLoadType to be of type string, got %T instead", value) + } + sv.LoadType = types.DataIntegrationFlowLoadType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetSourceConfiguration(v **types.DataIntegrationFlowDatasetSourceConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowDatasetSourceConfiguration + if *v == nil { + sv = &types.DataIntegrationFlowDatasetSourceConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "datasetIdentifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DatasetIdentifier to be of type string, got %T instead", value) + } + sv.DatasetIdentifier = ptr.String(jtv) + } + + case "options": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetOptions(&sv.Options, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetTargetConfiguration(v **types.DataIntegrationFlowDatasetTargetConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowDatasetTargetConfiguration + if *v == nil { + sv = &types.DataIntegrationFlowDatasetTargetConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "datasetIdentifier": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DatasetIdentifier to be of type string, got %T instead", value) + } + sv.DatasetIdentifier = ptr.String(jtv) + } + + case "options": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetOptions(&sv.Options, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowList(v *[]types.DataIntegrationFlow, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DataIntegrationFlow + if *v == nil { + cv = []types.DataIntegrationFlow{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DataIntegrationFlow + destAddr := &col + if err := awsRestjson1_deserializeDocumentDataIntegrationFlow(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowS3Options(v **types.DataIntegrationFlowS3Options, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowS3Options + if *v == nil { + sv = &types.DataIntegrationFlowS3Options{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fileType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowFileType to be of type string, got %T instead", value) + } + sv.FileType = types.DataIntegrationFlowFileType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowS3SourceConfiguration(v **types.DataIntegrationFlowS3SourceConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowS3SourceConfiguration + if *v == nil { + sv = &types.DataIntegrationFlowS3SourceConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bucketName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3BucketName to be of type string, got %T instead", value) + } + sv.BucketName = ptr.String(jtv) + } + + case "options": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowS3Options(&sv.Options, value); err != nil { + return err + } + + case "prefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowS3Prefix to be of type string, got %T instead", value) + } + sv.Prefix = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowS3TargetConfiguration(v **types.DataIntegrationFlowS3TargetConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowS3TargetConfiguration + if *v == nil { + sv = &types.DataIntegrationFlowS3TargetConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "bucketName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected S3BucketName to be of type string, got %T instead", value) + } + sv.BucketName = ptr.String(jtv) + } + + case "options": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowS3Options(&sv.Options, value); err != nil { + return err + } + + case "prefix": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowS3Prefix to be of type string, got %T instead", value) + } + sv.Prefix = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowSource(v **types.DataIntegrationFlowSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowSource + if *v == nil { + sv = &types.DataIntegrationFlowSource{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "datasetSource": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetSourceConfiguration(&sv.DatasetSource, value); err != nil { + return err + } + + case "s3Source": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowS3SourceConfiguration(&sv.S3Source, value); err != nil { + return err + } + + case "sourceName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowSourceName to be of type string, got %T instead", value) + } + sv.SourceName = ptr.String(jtv) + } + + case "sourceType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowSourceType to be of type string, got %T instead", value) + } + sv.SourceType = types.DataIntegrationFlowSourceType(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowSourceList(v *[]types.DataIntegrationFlowSource, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DataIntegrationFlowSource + if *v == nil { + cv = []types.DataIntegrationFlowSource{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DataIntegrationFlowSource + destAddr := &col + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowSource(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentDataIntegrationFlowSQLTransformationConfiguration(v **types.DataIntegrationFlowSQLTransformationConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataIntegrationFlowSQLTransformationConfiguration + if *v == nil { + sv = &types.DataIntegrationFlowSQLTransformationConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "query": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowSQLQuery to be of type string, got %T instead", value) + } + sv.Query = ptr.String(jtv) + } + + default: + _, _ = key, value - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), } - return err } + *v = sv + return nil +} - err := awsRestjson1_deserializeDocumentThrottlingException(&output, shape) +func awsRestjson1_deserializeDocumentDataIntegrationFlowTarget(v **types.DataIntegrationFlowTarget, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) } - errorBody.Seek(0, io.SeekStart) + var sv *types.DataIntegrationFlowTarget + if *v == nil { + sv = &types.DataIntegrationFlowTarget{} + } else { + sv = *v + } - return output -} + for key, value := range shape { + switch key { + case "datasetTarget": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowDatasetTargetConfiguration(&sv.DatasetTarget, value); err != nil { + return err + } -func awsRestjson1_deserializeErrorValidationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ValidationException{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) + case "s3Target": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowS3TargetConfiguration(&sv.S3Target, value); err != nil { + return err + } - body := io.TeeReader(errorBody, ringBuffer) - decoder := json.NewDecoder(body) - decoder.UseNumber() - var shape interface{} - if err := decoder.Decode(&shape); err != nil && err != io.EOF { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - return err - } + case "targetType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataIntegrationFlowTargetType to be of type string, got %T instead", value) + } + sv.TargetType = types.DataIntegrationFlowTargetType(jtv) + } - err := awsRestjson1_deserializeDocumentValidationException(&output, shape) + default: + _, _ = key, value - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - err = &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), } - return err } - - errorBody.Seek(0, io.SeekStart) - - return output + *v = sv + return nil } -func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { +func awsRestjson1_deserializeDocumentDataIntegrationFlowTransformation(v **types.DataIntegrationFlowTransformation, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -812,22 +3716,27 @@ func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDenie return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.AccessDeniedException + var sv *types.DataIntegrationFlowTransformation if *v == nil { - sv = &types.AccessDeniedException{} + sv = &types.DataIntegrationFlowTransformation{} } else { sv = *v } for key, value := range shape { switch key { - case "message": + case "sqlTransformation": + if err := awsRestjson1_deserializeDocumentDataIntegrationFlowSQLTransformationConfiguration(&sv.SqlTransformation, value); err != nil { + return err + } + + case "transformationType": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected DataIntegrationFlowTransformationType to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.TransformationType = types.DataIntegrationFlowTransformationType(jtv) } default: @@ -839,7 +3748,7 @@ func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDenie return nil } -func awsRestjson1_deserializeDocumentBillOfMaterialsImportJob(v **types.BillOfMaterialsImportJob, value interface{}) error { +func awsRestjson1_deserializeDocumentDataLakeDataset(v **types.DataLakeDataset, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -852,58 +3761,174 @@ func awsRestjson1_deserializeDocumentBillOfMaterialsImportJob(v **types.BillOfMa return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.BillOfMaterialsImportJob + var sv *types.DataLakeDataset if *v == nil { - sv = &types.BillOfMaterialsImportJob{} + sv = &types.DataLakeDataset{} } else { sv = *v } for key, value := range shape { switch key { - case "instanceId": + case "arn": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected UUID to be of type string, got %T instead", value) + return fmt.Errorf("expected AscResourceArn to be of type string, got %T instead", value) } - sv.InstanceId = ptr.String(jtv) + sv.Arn = ptr.String(jtv) } - case "jobId": + case "createdTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreatedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataLakeDatasetDescription to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "instanceId": if value != nil { jtv, ok := value.(string) if !ok { return fmt.Errorf("expected UUID to be of type string, got %T instead", value) } - sv.JobId = ptr.String(jtv) + sv.InstanceId = ptr.String(jtv) } - case "message": + case "lastModifiedTime": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastModifiedTime = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected Timestamp to be a JSON Number, got %T instead", value) + + } + } + + case "name": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected DataLakeDatasetName to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.Name = ptr.String(jtv) } - case "s3uri": + case "namespace": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ConfigurationS3Uri to be of type string, got %T instead", value) + return fmt.Errorf("expected DataLakeDatasetNamespace to be of type string, got %T instead", value) } - sv.S3uri = ptr.String(jtv) + sv.Namespace = ptr.String(jtv) } - case "status": + case "schema": + if err := awsRestjson1_deserializeDocumentDataLakeDatasetSchema(&sv.Schema, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentDataLakeDatasetList(v *[]types.DataLakeDataset, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DataLakeDataset + if *v == nil { + cv = []types.DataLakeDataset{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DataLakeDataset + destAddr := &col + if err := awsRestjson1_deserializeDocumentDataLakeDataset(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsRestjson1_deserializeDocumentDataLakeDatasetSchema(v **types.DataLakeDatasetSchema, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DataLakeDatasetSchema + if *v == nil { + sv = &types.DataLakeDatasetSchema{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "fields": + if err := awsRestjson1_deserializeDocumentDataLakeDatasetSchemaFieldList(&sv.Fields, value); err != nil { + return err + } + + case "name": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected ConfigurationJobStatus to be of type string, got %T instead", value) + return fmt.Errorf("expected DataLakeDatasetSchemaName to be of type string, got %T instead", value) } - sv.Status = types.ConfigurationJobStatus(jtv) + sv.Name = ptr.String(jtv) } default: @@ -915,7 +3940,7 @@ func awsRestjson1_deserializeDocumentBillOfMaterialsImportJob(v **types.BillOfMa return nil } -func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictException, value interface{}) error { +func awsRestjson1_deserializeDocumentDataLakeDatasetSchemaField(v **types.DataLakeDatasetSchemaField, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } @@ -928,22 +3953,40 @@ func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictExcepti return fmt.Errorf("unexpected JSON type %v", value) } - var sv *types.ConflictException + var sv *types.DataLakeDatasetSchemaField if *v == nil { - sv = &types.ConflictException{} + sv = &types.DataLakeDatasetSchemaField{} } else { sv = *v } for key, value := range shape { switch key { - case "message": + case "isRequired": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.IsRequired = ptr.Bool(jtv) + } + + case "name": if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected DataLakeDatasetSchemaFieldName to be of type string, got %T instead", value) } - sv.Message = ptr.String(jtv) + sv.Name = ptr.String(jtv) + } + + case "type": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataLakeDatasetSchemaFieldType to be of type string, got %T instead", value) + } + sv.Type = types.DataLakeDatasetSchemaFieldType(jtv) } default: @@ -955,6 +3998,40 @@ func awsRestjson1_deserializeDocumentConflictException(v **types.ConflictExcepti return nil } +func awsRestjson1_deserializeDocumentDataLakeDatasetSchemaFieldList(v *[]types.DataLakeDatasetSchemaField, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.DataLakeDatasetSchemaField + if *v == nil { + cv = []types.DataLakeDatasetSchemaField{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.DataLakeDatasetSchemaField + destAddr := &col + if err := awsRestjson1_deserializeDocumentDataLakeDatasetSchemaField(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1075,6 +4152,42 @@ func awsRestjson1_deserializeDocumentServiceQuotaExceededException(v **types.Ser return nil } +func awsRestjson1_deserializeDocumentTagMap(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + func awsRestjson1_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/supplychain/generated.json b/service/supplychain/generated.json index a6af8ce99b2..41e8a625573 100644 --- a/service/supplychain/generated.json +++ b/service/supplychain/generated.json @@ -9,8 +9,21 @@ "api_client.go", "api_client_test.go", "api_op_CreateBillOfMaterialsImportJob.go", + "api_op_CreateDataIntegrationFlow.go", + "api_op_CreateDataLakeDataset.go", + "api_op_DeleteDataIntegrationFlow.go", + "api_op_DeleteDataLakeDataset.go", "api_op_GetBillOfMaterialsImportJob.go", + "api_op_GetDataIntegrationFlow.go", + "api_op_GetDataLakeDataset.go", + "api_op_ListDataIntegrationFlows.go", + "api_op_ListDataLakeDatasets.go", + "api_op_ListTagsForResource.go", "api_op_SendDataIntegrationEvent.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateDataIntegrationFlow.go", + "api_op_UpdateDataLakeDataset.go", "auth.go", "deserializers.go", "doc.go", diff --git a/service/supplychain/serializers.go b/service/supplychain/serializers.go index 452e8dac0ae..fc2ef1354c8 100644 --- a/service/supplychain/serializers.go +++ b/service/supplychain/serializers.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "fmt" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/encoding/httpbinding" smithyjson "github.com/aws/smithy-go/encoding/json" @@ -114,14 +115,1209 @@ func awsRestjson1_serializeOpDocumentCreateBillOfMaterialsImportJobInput(v *Crea return nil } +type awsRestjson1_serializeOpCreateDataIntegrationFlow struct { +} + +func (*awsRestjson1_serializeOpCreateDataIntegrationFlow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateDataIntegrationFlow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateDataIntegrationFlowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/data-integration/instance/{instanceId}/data-integration-flows/{name}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsCreateDataIntegrationFlowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateDataIntegrationFlowInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateDataIntegrationFlowInput(v *CreateDataIntegrationFlowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateDataIntegrationFlowInput(v *CreateDataIntegrationFlowInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Sources != nil { + ok := object.Key("sources") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowSourceList(v.Sources, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + if v.Target != nil { + ok := object.Key("target") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowTarget(v.Target, ok); err != nil { + return err + } + } + + if v.Transformation != nil { + ok := object.Key("transformation") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowTransformation(v.Transformation, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpCreateDataLakeDataset struct { +} + +func (*awsRestjson1_serializeOpCreateDataLakeDataset) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateDataLakeDataset) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateDataLakeDatasetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "PUT" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsCreateDataLakeDatasetInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateDataLakeDatasetInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateDataLakeDatasetInput(v *CreateDataLakeDatasetInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + if v.Namespace == nil || len(*v.Namespace) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member namespace must not be empty")} + } + if v.Namespace != nil { + if err := encoder.SetURI("namespace").String(*v.Namespace); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateDataLakeDatasetInput(v *CreateDataLakeDatasetInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) + } + + if v.Schema != nil { + ok := object.Key("schema") + if err := awsRestjson1_serializeDocumentDataLakeDatasetSchema(v.Schema, ok); err != nil { + return err + } + } + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDeleteDataIntegrationFlow struct { +} + +func (*awsRestjson1_serializeOpDeleteDataIntegrationFlow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteDataIntegrationFlow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteDataIntegrationFlowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/data-integration/instance/{instanceId}/data-integration-flows/{name}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteDataIntegrationFlowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteDataIntegrationFlowInput(v *DeleteDataIntegrationFlowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpDeleteDataLakeDataset struct { +} + +func (*awsRestjson1_serializeOpDeleteDataLakeDataset) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpDeleteDataLakeDataset) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteDataLakeDatasetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsDeleteDataLakeDatasetInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsDeleteDataLakeDatasetInput(v *DeleteDataLakeDatasetInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + if v.Namespace == nil || len(*v.Namespace) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member namespace must not be empty")} + } + if v.Namespace != nil { + if err := encoder.SetURI("namespace").String(*v.Namespace); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpGetBillOfMaterialsImportJob struct { } -func (*awsRestjson1_serializeOpGetBillOfMaterialsImportJob) ID() string { +func (*awsRestjson1_serializeOpGetBillOfMaterialsImportJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetBillOfMaterialsImportJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBillOfMaterialsImportJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/configuration/instances/{instanceId}/bill-of-materials-import-jobs/{jobId}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetBillOfMaterialsImportJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetBillOfMaterialsImportJobInput(v *GetBillOfMaterialsImportJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.JobId == nil || len(*v.JobId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member jobId must not be empty")} + } + if v.JobId != nil { + if err := encoder.SetURI("jobId").String(*v.JobId); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetDataIntegrationFlow struct { +} + +func (*awsRestjson1_serializeOpGetDataIntegrationFlow) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetDataIntegrationFlow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDataIntegrationFlowInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/data-integration/instance/{instanceId}/data-integration-flows/{name}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetDataIntegrationFlowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetDataIntegrationFlowInput(v *GetDataIntegrationFlowInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpGetDataLakeDataset struct { +} + +func (*awsRestjson1_serializeOpGetDataLakeDataset) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpGetDataLakeDataset) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetDataLakeDatasetInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsGetDataLakeDatasetInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsGetDataLakeDatasetInput(v *GetDataLakeDatasetInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + if v.Namespace == nil || len(*v.Namespace) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member namespace must not be empty")} + } + if v.Namespace != nil { + if err := encoder.SetURI("namespace").String(*v.Namespace); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpListDataIntegrationFlows struct { +} + +func (*awsRestjson1_serializeOpListDataIntegrationFlows) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListDataIntegrationFlows) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDataIntegrationFlowsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/data-integration/instance/{instanceId}/data-integration-flows") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListDataIntegrationFlowsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListDataIntegrationFlowsInput(v *ListDataIntegrationFlowsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListDataLakeDatasets struct { +} + +func (*awsRestjson1_serializeOpListDataLakeDatasets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListDataLakeDatasets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDataLakeDatasetsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListDataLakeDatasetsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListDataLakeDatasetsInput(v *ListDataLakeDatasetsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + if v.MaxResults != nil { + encoder.SetQuery("maxResults").Integer(*v.MaxResults) + } + + if v.Namespace == nil || len(*v.Namespace) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member namespace must not be empty")} + } + if v.Namespace != nil { + if err := encoder.SetURI("namespace").String(*v.Namespace); err != nil { + return err + } + } + + if v.NextToken != nil { + encoder.SetQuery("nextToken").String(*v.NextToken) + } + + return nil +} + +type awsRestjson1_serializeOpListTagsForResource struct { +} + +func (*awsRestjson1_serializeOpListTagsForResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpListTagsForResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListTagsForResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/tags/{resourceArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsListTagsForResourceInput(v *ListTagsForResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceArn == nil || len(*v.ResourceArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")} + } + if v.ResourceArn != nil { + if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpSendDataIntegrationEvent struct { +} + +func (*awsRestjson1_serializeOpSendDataIntegrationEvent) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpSendDataIntegrationEvent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SendDataIntegrationEventInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api-data/data-integration/instance/{instanceId}/data-integration-events") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsSendDataIntegrationEventInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentSendDataIntegrationEventInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsSendDataIntegrationEventInput(v *SendDataIntegrationEventInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.InstanceId == nil || len(*v.InstanceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member instanceId must not be empty")} + } + if v.InstanceId != nil { + if err := encoder.SetURI("instanceId").String(*v.InstanceId); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentSendDataIntegrationEventInput(v *SendDataIntegrationEventInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientToken != nil { + ok := object.Key("clientToken") + ok.String(*v.ClientToken) + } + + if v.Data != nil { + ok := object.Key("data") + ok.String(*v.Data) + } + + if v.EventGroupId != nil { + ok := object.Key("eventGroupId") + ok.String(*v.EventGroupId) + } + + if v.EventTimestamp != nil { + ok := object.Key("eventTimestamp") + ok.Double(smithytime.FormatEpochSeconds(*v.EventTimestamp)) + } + + if len(v.EventType) > 0 { + ok := object.Key("eventType") + ok.String(string(v.EventType)) + } + + return nil +} + +type awsRestjson1_serializeOpTagResource struct { +} + +func (*awsRestjson1_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/tags/{resourceArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsTagResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsTagResourceInput(v *TagResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceArn == nil || len(*v.ResourceArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")} + } + if v.ResourceArn != nil { + if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Tags != nil { + ok := object.Key("tags") + if err := awsRestjson1_serializeDocumentTagMap(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpUntagResource struct { +} + +func (*awsRestjson1_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/api/tags/{resourceArn}") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsUntagResourceInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsUntagResourceInput(v *UntagResourceInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ResourceArn == nil || len(*v.ResourceArn) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member resourceArn must not be empty")} + } + if v.ResourceArn != nil { + if err := encoder.SetURI("resourceArn").String(*v.ResourceArn); err != nil { + return err + } + } + + if v.TagKeys != nil { + for i := range v.TagKeys { + encoder.AddQuery("tagKeys").String(v.TagKeys[i]) + } + } + + return nil +} + +type awsRestjson1_serializeOpUpdateDataIntegrationFlow struct { +} + +func (*awsRestjson1_serializeOpUpdateDataIntegrationFlow) ID() string { return "OperationSerializer" } -func (m *awsRestjson1_serializeOpGetBillOfMaterialsImportJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsRestjson1_serializeOpUpdateDataIntegrationFlow) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -133,16 +1329,16 @@ func (m *awsRestjson1_serializeOpGetBillOfMaterialsImportJob) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*GetBillOfMaterialsImportJobInput) + input, ok := in.Parameters.(*UpdateDataIntegrationFlowInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/api/configuration/instances/{instanceId}/bill-of-materials-import-jobs/{jobId}") + opPath, opQuery := httpbinding.SplitURI("/api/data-integration/instance/{instanceId}/data-integration-flows/{name}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "GET" + request.Method = "PATCH" var restEncoder *httpbinding.Encoder if request.URL.RawPath == "" { restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) @@ -155,7 +1351,18 @@ func (m *awsRestjson1_serializeOpGetBillOfMaterialsImportJob) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: err} } - if err := awsRestjson1_serializeOpHttpBindingsGetBillOfMaterialsImportJobInput(input, restEncoder); err != nil { + if err := awsRestjson1_serializeOpHttpBindingsUpdateDataIntegrationFlowInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentUpdateDataIntegrationFlowInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -168,7 +1375,7 @@ func (m *awsRestjson1_serializeOpGetBillOfMaterialsImportJob) HandleSerialize(ct span.End() return next.HandleSerialize(ctx, in) } -func awsRestjson1_serializeOpHttpBindingsGetBillOfMaterialsImportJobInput(v *GetBillOfMaterialsImportJobInput, encoder *httpbinding.Encoder) error { +func awsRestjson1_serializeOpHttpBindingsUpdateDataIntegrationFlowInput(v *UpdateDataIntegrationFlowInput, encoder *httpbinding.Encoder) error { if v == nil { return fmt.Errorf("unsupported serialization of nil %T", v) } @@ -182,11 +1389,11 @@ func awsRestjson1_serializeOpHttpBindingsGetBillOfMaterialsImportJobInput(v *Get } } - if v.JobId == nil || len(*v.JobId) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member jobId must not be empty")} + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} } - if v.JobId != nil { - if err := encoder.SetURI("jobId").String(*v.JobId); err != nil { + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { return err } } @@ -194,14 +1401,42 @@ func awsRestjson1_serializeOpHttpBindingsGetBillOfMaterialsImportJobInput(v *Get return nil } -type awsRestjson1_serializeOpSendDataIntegrationEvent struct { +func awsRestjson1_serializeOpDocumentUpdateDataIntegrationFlowInput(v *UpdateDataIntegrationFlowInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Sources != nil { + ok := object.Key("sources") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowSourceList(v.Sources, ok); err != nil { + return err + } + } + + if v.Target != nil { + ok := object.Key("target") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowTarget(v.Target, ok); err != nil { + return err + } + } + + if v.Transformation != nil { + ok := object.Key("transformation") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowTransformation(v.Transformation, ok); err != nil { + return err + } + } + + return nil } -func (*awsRestjson1_serializeOpSendDataIntegrationEvent) ID() string { +type awsRestjson1_serializeOpUpdateDataLakeDataset struct { +} + +func (*awsRestjson1_serializeOpUpdateDataLakeDataset) ID() string { return "OperationSerializer" } -func (m *awsRestjson1_serializeOpSendDataIntegrationEvent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( +func (m *awsRestjson1_serializeOpUpdateDataLakeDataset) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { _, span := tracing.StartSpan(ctx, "OperationSerializer") @@ -213,16 +1448,16 @@ func (m *awsRestjson1_serializeOpSendDataIntegrationEvent) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} } - input, ok := in.Parameters.(*SendDataIntegrationEventInput) + input, ok := in.Parameters.(*UpdateDataLakeDatasetInput) _ = input if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/api-data/data-integration/instance/{instanceId}/data-integration-events") + opPath, opQuery := httpbinding.SplitURI("/api/datalake/instance/{instanceId}/namespaces/{namespace}/datasets/{name}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) - request.Method = "POST" + request.Method = "PATCH" var restEncoder *httpbinding.Encoder if request.URL.RawPath == "" { restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) @@ -235,14 +1470,14 @@ func (m *awsRestjson1_serializeOpSendDataIntegrationEvent) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: err} } - if err := awsRestjson1_serializeOpHttpBindingsSendDataIntegrationEventInput(input, restEncoder); err != nil { + if err := awsRestjson1_serializeOpHttpBindingsUpdateDataLakeDatasetInput(input, restEncoder); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } restEncoder.SetHeader("Content-Type").String("application/json") jsonEncoder := smithyjson.NewEncoder() - if err := awsRestjson1_serializeOpDocumentSendDataIntegrationEventInput(input, jsonEncoder.Value); err != nil { + if err := awsRestjson1_serializeOpDocumentUpdateDataLakeDatasetInput(input, jsonEncoder.Value); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -259,7 +1494,7 @@ func (m *awsRestjson1_serializeOpSendDataIntegrationEvent) HandleSerialize(ctx c span.End() return next.HandleSerialize(ctx, in) } -func awsRestjson1_serializeOpHttpBindingsSendDataIntegrationEventInput(v *SendDataIntegrationEventInput, encoder *httpbinding.Encoder) error { +func awsRestjson1_serializeOpHttpBindingsUpdateDataLakeDatasetInput(v *UpdateDataLakeDatasetInput, encoder *httpbinding.Encoder) error { if v == nil { return fmt.Errorf("unsupported serialization of nil %T", v) } @@ -273,37 +1508,316 @@ func awsRestjson1_serializeOpHttpBindingsSendDataIntegrationEventInput(v *SendDa } } + if v.Name == nil || len(*v.Name) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member name must not be empty")} + } + if v.Name != nil { + if err := encoder.SetURI("name").String(*v.Name); err != nil { + return err + } + } + + if v.Namespace == nil || len(*v.Namespace) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member namespace must not be empty")} + } + if v.Namespace != nil { + if err := encoder.SetURI("namespace").String(*v.Namespace); err != nil { + return err + } + } + return nil } -func awsRestjson1_serializeOpDocumentSendDataIntegrationEventInput(v *SendDataIntegrationEventInput, value smithyjson.Value) error { +func awsRestjson1_serializeOpDocumentUpdateDataLakeDatasetInput(v *UpdateDataLakeDatasetInput, value smithyjson.Value) error { object := value.Object() defer object.Close() - if v.ClientToken != nil { - ok := object.Key("clientToken") - ok.String(*v.ClientToken) + if v.Description != nil { + ok := object.Key("description") + ok.String(*v.Description) } - if v.Data != nil { - ok := object.Key("data") - ok.String(*v.Data) + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowDatasetOptions(v *types.DataIntegrationFlowDatasetOptions, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DedupeRecords != nil { + ok := object.Key("dedupeRecords") + ok.Boolean(*v.DedupeRecords) } - if v.EventGroupId != nil { - ok := object.Key("eventGroupId") - ok.String(*v.EventGroupId) + if len(v.LoadType) > 0 { + ok := object.Key("loadType") + ok.String(string(v.LoadType)) } - if v.EventTimestamp != nil { - ok := object.Key("eventTimestamp") - ok.Double(smithytime.FormatEpochSeconds(*v.EventTimestamp)) + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowDatasetSourceConfiguration(v *types.DataIntegrationFlowDatasetSourceConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DatasetIdentifier != nil { + ok := object.Key("datasetIdentifier") + ok.String(*v.DatasetIdentifier) } - if len(v.EventType) > 0 { - ok := object.Key("eventType") - ok.String(string(v.EventType)) + if v.Options != nil { + ok := object.Key("options") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowDatasetOptions(v.Options, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowDatasetTargetConfiguration(v *types.DataIntegrationFlowDatasetTargetConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DatasetIdentifier != nil { + ok := object.Key("datasetIdentifier") + ok.String(*v.DatasetIdentifier) + } + + if v.Options != nil { + ok := object.Key("options") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowDatasetOptions(v.Options, ok); err != nil { + return err + } + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowS3Options(v *types.DataIntegrationFlowS3Options, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.FileType) > 0 { + ok := object.Key("fileType") + ok.String(string(v.FileType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowS3SourceConfiguration(v *types.DataIntegrationFlowS3SourceConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BucketName != nil { + ok := object.Key("bucketName") + ok.String(*v.BucketName) + } + + if v.Options != nil { + ok := object.Key("options") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowS3Options(v.Options, ok); err != nil { + return err + } + } + + if v.Prefix != nil { + ok := object.Key("prefix") + ok.String(*v.Prefix) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowS3TargetConfiguration(v *types.DataIntegrationFlowS3TargetConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BucketName != nil { + ok := object.Key("bucketName") + ok.String(*v.BucketName) + } + + if v.Options != nil { + ok := object.Key("options") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowS3Options(v.Options, ok); err != nil { + return err + } + } + + if v.Prefix != nil { + ok := object.Key("prefix") + ok.String(*v.Prefix) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowSource(v *types.DataIntegrationFlowSource, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DatasetSource != nil { + ok := object.Key("datasetSource") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowDatasetSourceConfiguration(v.DatasetSource, ok); err != nil { + return err + } + } + + if v.S3Source != nil { + ok := object.Key("s3Source") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowS3SourceConfiguration(v.S3Source, ok); err != nil { + return err + } + } + + if v.SourceName != nil { + ok := object.Key("sourceName") + ok.String(*v.SourceName) + } + + if len(v.SourceType) > 0 { + ok := object.Key("sourceType") + ok.String(string(v.SourceType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowSourceList(v []types.DataIntegrationFlowSource, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentDataIntegrationFlowSource(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowSQLTransformationConfiguration(v *types.DataIntegrationFlowSQLTransformationConfiguration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Query != nil { + ok := object.Key("query") + ok.String(*v.Query) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowTarget(v *types.DataIntegrationFlowTarget, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.DatasetTarget != nil { + ok := object.Key("datasetTarget") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowDatasetTargetConfiguration(v.DatasetTarget, ok); err != nil { + return err + } + } + + if v.S3Target != nil { + ok := object.Key("s3Target") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowS3TargetConfiguration(v.S3Target, ok); err != nil { + return err + } + } + + if len(v.TargetType) > 0 { + ok := object.Key("targetType") + ok.String(string(v.TargetType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataIntegrationFlowTransformation(v *types.DataIntegrationFlowTransformation, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.SqlTransformation != nil { + ok := object.Key("sqlTransformation") + if err := awsRestjson1_serializeDocumentDataIntegrationFlowSQLTransformationConfiguration(v.SqlTransformation, ok); err != nil { + return err + } + } + + if len(v.TransformationType) > 0 { + ok := object.Key("transformationType") + ok.String(string(v.TransformationType)) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataLakeDatasetSchema(v *types.DataLakeDatasetSchema, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Fields != nil { + ok := object.Key("fields") + if err := awsRestjson1_serializeDocumentDataLakeDatasetSchemaFieldList(v.Fields, ok); err != nil { + return err + } + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataLakeDatasetSchemaField(v *types.DataLakeDatasetSchemaField, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.IsRequired != nil { + ok := object.Key("isRequired") + ok.Boolean(*v.IsRequired) + } + + if v.Name != nil { + ok := object.Key("name") + ok.String(*v.Name) } + if len(v.Type) > 0 { + ok := object.Key("type") + ok.String(string(v.Type)) + } + + return nil +} + +func awsRestjson1_serializeDocumentDataLakeDatasetSchemaFieldList(v []types.DataLakeDatasetSchemaField, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsRestjson1_serializeDocumentDataLakeDatasetSchemaField(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsRestjson1_serializeDocumentTagMap(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } return nil } diff --git a/service/supplychain/snapshot/api_op_CreateDataIntegrationFlow.go.snap b/service/supplychain/snapshot/api_op_CreateDataIntegrationFlow.go.snap new file mode 100644 index 00000000000..b16cbdc9d8e --- /dev/null +++ b/service/supplychain/snapshot/api_op_CreateDataIntegrationFlow.go.snap @@ -0,0 +1,41 @@ +CreateDataIntegrationFlow + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_CreateDataLakeDataset.go.snap b/service/supplychain/snapshot/api_op_CreateDataLakeDataset.go.snap new file mode 100644 index 00000000000..2d1c7079256 --- /dev/null +++ b/service/supplychain/snapshot/api_op_CreateDataLakeDataset.go.snap @@ -0,0 +1,41 @@ +CreateDataLakeDataset + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_DeleteDataIntegrationFlow.go.snap b/service/supplychain/snapshot/api_op_DeleteDataIntegrationFlow.go.snap new file mode 100644 index 00000000000..79316bb6df9 --- /dev/null +++ b/service/supplychain/snapshot/api_op_DeleteDataIntegrationFlow.go.snap @@ -0,0 +1,41 @@ +DeleteDataIntegrationFlow + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_DeleteDataLakeDataset.go.snap b/service/supplychain/snapshot/api_op_DeleteDataLakeDataset.go.snap new file mode 100644 index 00000000000..73790d292c2 --- /dev/null +++ b/service/supplychain/snapshot/api_op_DeleteDataLakeDataset.go.snap @@ -0,0 +1,41 @@ +DeleteDataLakeDataset + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_GetDataIntegrationFlow.go.snap b/service/supplychain/snapshot/api_op_GetDataIntegrationFlow.go.snap new file mode 100644 index 00000000000..7fb81fbb18b --- /dev/null +++ b/service/supplychain/snapshot/api_op_GetDataIntegrationFlow.go.snap @@ -0,0 +1,41 @@ +GetDataIntegrationFlow + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_GetDataLakeDataset.go.snap b/service/supplychain/snapshot/api_op_GetDataLakeDataset.go.snap new file mode 100644 index 00000000000..06be4ac7064 --- /dev/null +++ b/service/supplychain/snapshot/api_op_GetDataLakeDataset.go.snap @@ -0,0 +1,41 @@ +GetDataLakeDataset + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_ListDataIntegrationFlows.go.snap b/service/supplychain/snapshot/api_op_ListDataIntegrationFlows.go.snap new file mode 100644 index 00000000000..fdcc71e15a7 --- /dev/null +++ b/service/supplychain/snapshot/api_op_ListDataIntegrationFlows.go.snap @@ -0,0 +1,41 @@ +ListDataIntegrationFlows + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_ListDataLakeDatasets.go.snap b/service/supplychain/snapshot/api_op_ListDataLakeDatasets.go.snap new file mode 100644 index 00000000000..c72e430b4a1 --- /dev/null +++ b/service/supplychain/snapshot/api_op_ListDataLakeDatasets.go.snap @@ -0,0 +1,41 @@ +ListDataLakeDatasets + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_ListTagsForResource.go.snap b/service/supplychain/snapshot/api_op_ListTagsForResource.go.snap new file mode 100644 index 00000000000..071d3ac4e96 --- /dev/null +++ b/service/supplychain/snapshot/api_op_ListTagsForResource.go.snap @@ -0,0 +1,41 @@ +ListTagsForResource + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_TagResource.go.snap b/service/supplychain/snapshot/api_op_TagResource.go.snap new file mode 100644 index 00000000000..ae6f8e0846c --- /dev/null +++ b/service/supplychain/snapshot/api_op_TagResource.go.snap @@ -0,0 +1,41 @@ +TagResource + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_UntagResource.go.snap b/service/supplychain/snapshot/api_op_UntagResource.go.snap new file mode 100644 index 00000000000..c7bbe038d98 --- /dev/null +++ b/service/supplychain/snapshot/api_op_UntagResource.go.snap @@ -0,0 +1,41 @@ +UntagResource + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_UpdateDataIntegrationFlow.go.snap b/service/supplychain/snapshot/api_op_UpdateDataIntegrationFlow.go.snap new file mode 100644 index 00000000000..421c9b12e76 --- /dev/null +++ b/service/supplychain/snapshot/api_op_UpdateDataIntegrationFlow.go.snap @@ -0,0 +1,41 @@ +UpdateDataIntegrationFlow + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot/api_op_UpdateDataLakeDataset.go.snap b/service/supplychain/snapshot/api_op_UpdateDataLakeDataset.go.snap new file mode 100644 index 00000000000..ef23e79fe04 --- /dev/null +++ b/service/supplychain/snapshot/api_op_UpdateDataLakeDataset.go.snap @@ -0,0 +1,41 @@ +UpdateDataLakeDataset + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/supplychain/snapshot_test.go b/service/supplychain/snapshot_test.go index 0e5702e782b..2e053bae466 100644 --- a/service/supplychain/snapshot_test.go +++ b/service/supplychain/snapshot_test.go @@ -74,6 +74,54 @@ func TestCheckSnapshot_CreateBillOfMaterialsImportJob(t *testing.T) { } } +func TestCheckSnapshot_CreateDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_CreateDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "CreateDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_DeleteDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "DeleteDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_GetBillOfMaterialsImportJob(t *testing.T) { svc := New(Options{}) _, err := svc.GetBillOfMaterialsImportJob(context.Background(), nil, func(o *Options) { @@ -86,6 +134,66 @@ func TestCheckSnapshot_GetBillOfMaterialsImportJob(t *testing.T) { } } +func TestCheckSnapshot_GetDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_GetDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "GetDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListDataIntegrationFlows(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDataIntegrationFlows(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListDataIntegrationFlows") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListDataLakeDatasets(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDataLakeDatasets(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListDataLakeDatasets") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_ListTagsForResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTagsForResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "ListTagsForResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_SendDataIntegrationEvent(t *testing.T) { svc := New(Options{}) _, err := svc.SendDataIntegrationEvent(context.Background(), nil, func(o *Options) { @@ -97,6 +205,54 @@ func TestCheckSnapshot_SendDataIntegrationEvent(t *testing.T) { t.Fatal(err) } } + +func TestCheckSnapshot_TagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.TagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "TagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UntagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.UntagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UntagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestCheckSnapshot_UpdateDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "UpdateDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} func TestUpdateSnapshot_CreateBillOfMaterialsImportJob(t *testing.T) { svc := New(Options{}) _, err := svc.CreateBillOfMaterialsImportJob(context.Background(), nil, func(o *Options) { @@ -109,6 +265,54 @@ func TestUpdateSnapshot_CreateBillOfMaterialsImportJob(t *testing.T) { } } +func TestUpdateSnapshot_CreateDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_CreateDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.CreateDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "CreateDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_DeleteDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.DeleteDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "DeleteDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_GetBillOfMaterialsImportJob(t *testing.T) { svc := New(Options{}) _, err := svc.GetBillOfMaterialsImportJob(context.Background(), nil, func(o *Options) { @@ -121,6 +325,66 @@ func TestUpdateSnapshot_GetBillOfMaterialsImportJob(t *testing.T) { } } +func TestUpdateSnapshot_GetDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_GetDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.GetDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "GetDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListDataIntegrationFlows(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDataIntegrationFlows(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListDataIntegrationFlows") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListDataLakeDatasets(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListDataLakeDatasets(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListDataLakeDatasets") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_ListTagsForResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.ListTagsForResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "ListTagsForResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_SendDataIntegrationEvent(t *testing.T) { svc := New(Options{}) _, err := svc.SendDataIntegrationEvent(context.Background(), nil, func(o *Options) { @@ -132,3 +396,51 @@ func TestUpdateSnapshot_SendDataIntegrationEvent(t *testing.T) { t.Fatal(err) } } + +func TestUpdateSnapshot_TagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.TagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "TagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UntagResource(t *testing.T) { + svc := New(Options{}) + _, err := svc.UntagResource(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UntagResource") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateDataIntegrationFlow(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateDataIntegrationFlow(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateDataIntegrationFlow") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + +func TestUpdateSnapshot_UpdateDataLakeDataset(t *testing.T) { + svc := New(Options{}) + _, err := svc.UpdateDataLakeDataset(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "UpdateDataLakeDataset") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} diff --git a/service/supplychain/types/enums.go b/service/supplychain/types/enums.go index f99c5adb17f..3d3e2663dcb 100644 --- a/service/supplychain/types/enums.go +++ b/service/supplychain/types/enums.go @@ -71,3 +71,127 @@ func (DataIntegrationEventType) Values() []DataIntegrationEventType { "scn.data.supplyplan", } } + +type DataIntegrationFlowFileType string + +// Enum values for DataIntegrationFlowFileType +const ( + DataIntegrationFlowFileTypeCsv DataIntegrationFlowFileType = "CSV" + DataIntegrationFlowFileTypeParquet DataIntegrationFlowFileType = "PARQUET" + DataIntegrationFlowFileTypeJson DataIntegrationFlowFileType = "JSON" +) + +// Values returns all known values for DataIntegrationFlowFileType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataIntegrationFlowFileType) Values() []DataIntegrationFlowFileType { + return []DataIntegrationFlowFileType{ + "CSV", + "PARQUET", + "JSON", + } +} + +type DataIntegrationFlowLoadType string + +// Enum values for DataIntegrationFlowLoadType +const ( + DataIntegrationFlowLoadTypeIncremental DataIntegrationFlowLoadType = "INCREMENTAL" + DataIntegrationFlowLoadTypeReplace DataIntegrationFlowLoadType = "REPLACE" +) + +// Values returns all known values for DataIntegrationFlowLoadType. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataIntegrationFlowLoadType) Values() []DataIntegrationFlowLoadType { + return []DataIntegrationFlowLoadType{ + "INCREMENTAL", + "REPLACE", + } +} + +type DataIntegrationFlowSourceType string + +// Enum values for DataIntegrationFlowSourceType +const ( + DataIntegrationFlowSourceTypeS3 DataIntegrationFlowSourceType = "S3" + DataIntegrationFlowSourceTypeDataset DataIntegrationFlowSourceType = "DATASET" +) + +// Values returns all known values for DataIntegrationFlowSourceType. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataIntegrationFlowSourceType) Values() []DataIntegrationFlowSourceType { + return []DataIntegrationFlowSourceType{ + "S3", + "DATASET", + } +} + +type DataIntegrationFlowTargetType string + +// Enum values for DataIntegrationFlowTargetType +const ( + DataIntegrationFlowTargetTypeS3 DataIntegrationFlowTargetType = "S3" + DataIntegrationFlowTargetTypeDataset DataIntegrationFlowTargetType = "DATASET" +) + +// Values returns all known values for DataIntegrationFlowTargetType. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataIntegrationFlowTargetType) Values() []DataIntegrationFlowTargetType { + return []DataIntegrationFlowTargetType{ + "S3", + "DATASET", + } +} + +type DataIntegrationFlowTransformationType string + +// Enum values for DataIntegrationFlowTransformationType +const ( + DataIntegrationFlowTransformationTypeSql DataIntegrationFlowTransformationType = "SQL" + DataIntegrationFlowTransformationTypeNone DataIntegrationFlowTransformationType = "NONE" +) + +// Values returns all known values for DataIntegrationFlowTransformationType. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataIntegrationFlowTransformationType) Values() []DataIntegrationFlowTransformationType { + return []DataIntegrationFlowTransformationType{ + "SQL", + "NONE", + } +} + +type DataLakeDatasetSchemaFieldType string + +// Enum values for DataLakeDatasetSchemaFieldType +const ( + DataLakeDatasetSchemaFieldTypeInt DataLakeDatasetSchemaFieldType = "INT" + DataLakeDatasetSchemaFieldTypeDouble DataLakeDatasetSchemaFieldType = "DOUBLE" + DataLakeDatasetSchemaFieldTypeString DataLakeDatasetSchemaFieldType = "STRING" + DataLakeDatasetSchemaFieldTypeTimestamp DataLakeDatasetSchemaFieldType = "TIMESTAMP" +) + +// Values returns all known values for DataLakeDatasetSchemaFieldType. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DataLakeDatasetSchemaFieldType) Values() []DataLakeDatasetSchemaFieldType { + return []DataLakeDatasetSchemaFieldType{ + "INT", + "DOUBLE", + "STRING", + "TIMESTAMP", + } +} diff --git a/service/supplychain/types/types.go b/service/supplychain/types/types.go index db6517f643b..774b42d74bd 100644 --- a/service/supplychain/types/types.go +++ b/service/supplychain/types/types.go @@ -4,6 +4,7 @@ package types import ( smithydocument "github.com/aws/smithy-go/document" + "time" ) // The BillOfMaterialsImportJob details. @@ -36,4 +37,288 @@ type BillOfMaterialsImportJob struct { noSmithyDocumentSerde } +// The DataIntegrationFlow details. +type DataIntegrationFlow struct { + + // The DataIntegrationFlow creation timestamp. + // + // This member is required. + CreatedTime *time.Time + + // The DataIntegrationFlow instance ID. + // + // This member is required. + InstanceId *string + + // The DataIntegrationFlow last modified timestamp. + // + // This member is required. + LastModifiedTime *time.Time + + // The DataIntegrationFlow name. + // + // This member is required. + Name *string + + // The DataIntegrationFlow source configurations. + // + // This member is required. + Sources []DataIntegrationFlowSource + + // The DataIntegrationFlow target configuration. + // + // This member is required. + Target *DataIntegrationFlowTarget + + // The DataIntegrationFlow transformation configurations. + // + // This member is required. + Transformation *DataIntegrationFlowTransformation + + noSmithyDocumentSerde +} + +// The dataset options used in dataset source and target configurations. +type DataIntegrationFlowDatasetOptions struct { + + // The dataset load option to remove duplicates. + DedupeRecords *bool + + // The dataset data load type in dataset options. + LoadType DataIntegrationFlowLoadType + + noSmithyDocumentSerde +} + +// The dataset DataIntegrationFlow source configuration parameters. +type DataIntegrationFlowDatasetSourceConfiguration struct { + + // The ARN of the dataset. + // + // This member is required. + DatasetIdentifier *string + + // The dataset DataIntegrationFlow source options. + Options *DataIntegrationFlowDatasetOptions + + noSmithyDocumentSerde +} + +// The dataset DataIntegrationFlow target configuration parameters. +type DataIntegrationFlowDatasetTargetConfiguration struct { + + // The dataset ARN. + // + // This member is required. + DatasetIdentifier *string + + // The dataset DataIntegrationFlow target options. + Options *DataIntegrationFlowDatasetOptions + + noSmithyDocumentSerde +} + +// The Amazon S3 options used in S3 source and target configurations. +type DataIntegrationFlowS3Options struct { + + // The Amazon S3 file type in S3 options. + FileType DataIntegrationFlowFileType + + noSmithyDocumentSerde +} + +// The S3 DataIntegrationFlow source configuration parameters. +type DataIntegrationFlowS3SourceConfiguration struct { + + // The bucketName of the S3 source objects. + // + // This member is required. + BucketName *string + + // The prefix of the S3 source objects. + // + // This member is required. + Prefix *string + + // The other options of the S3 DataIntegrationFlow source. + Options *DataIntegrationFlowS3Options + + noSmithyDocumentSerde +} + +// The S3 DataIntegrationFlow target configuration parameters. +type DataIntegrationFlowS3TargetConfiguration struct { + + // The bucketName of the S3 target objects. + // + // This member is required. + BucketName *string + + // The prefix of the S3 target objects. + // + // This member is required. + Prefix *string + + // The S3 DataIntegrationFlow target options. + Options *DataIntegrationFlowS3Options + + noSmithyDocumentSerde +} + +// The DataIntegrationFlow source parameters. +type DataIntegrationFlowSource struct { + + // The DataIntegrationFlow source name that can be used as table alias in SQL + // transformation query. + // + // This member is required. + SourceName *string + + // The DataIntegrationFlow source type. + // + // This member is required. + SourceType DataIntegrationFlowSourceType + + // The dataset DataIntegrationFlow source. + DatasetSource *DataIntegrationFlowDatasetSourceConfiguration + + // The S3 DataIntegrationFlow source. + S3Source *DataIntegrationFlowS3SourceConfiguration + + noSmithyDocumentSerde +} + +// The SQL DataIntegrationFlow transformation configuration parameters. +type DataIntegrationFlowSQLTransformationConfiguration struct { + + // The transformation SQL query body based on SparkSQL. + // + // This member is required. + Query *string + + noSmithyDocumentSerde +} + +// The DataIntegrationFlow target parameters. +type DataIntegrationFlowTarget struct { + + // The DataIntegrationFlow target type. + // + // This member is required. + TargetType DataIntegrationFlowTargetType + + // The dataset DataIntegrationFlow target. + DatasetTarget *DataIntegrationFlowDatasetTargetConfiguration + + // The S3 DataIntegrationFlow target. + S3Target *DataIntegrationFlowS3TargetConfiguration + + noSmithyDocumentSerde +} + +// The DataIntegrationFlow transformation parameters. +type DataIntegrationFlowTransformation struct { + + // The DataIntegrationFlow transformation type. + // + // This member is required. + TransformationType DataIntegrationFlowTransformationType + + // The SQL DataIntegrationFlow transformation configuration. + SqlTransformation *DataIntegrationFlowSQLTransformationConfiguration + + noSmithyDocumentSerde +} + +// The data lake dataset details. +type DataLakeDataset struct { + + // The arn of the dataset. + // + // This member is required. + Arn *string + + // The creation time of the dataset. + // + // This member is required. + CreatedTime *time.Time + + // The Amazon Web Services Supply Chain instance identifier. + // + // This member is required. + InstanceId *string + + // The last modified time of the dataset. + // + // This member is required. + LastModifiedTime *time.Time + + // The name of the dataset. For asc name space, the name must be one of the + // supported data entities under [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Name *string + + // The name space of the dataset. The available values are: + // + // - asc - For information on the Amazon Web Services Supply Chain supported + // datasets see [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]. + // + // - default - For datasets with custom user-defined schemas. + // + // [https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html]: https://docs.aws.amazon.com/aws-supply-chain/latest/userguide/data-model-asc.html + // + // This member is required. + Namespace *string + + // The schema of the dataset. + // + // This member is required. + Schema *DataLakeDatasetSchema + + // The description of the dataset. + Description *string + + noSmithyDocumentSerde +} + +// The schema details of the dataset. +type DataLakeDatasetSchema struct { + + // The list of field details of the dataset schema. + // + // This member is required. + Fields []DataLakeDatasetSchemaField + + // The name of the dataset schema. + // + // This member is required. + Name *string + + noSmithyDocumentSerde +} + +// The dataset field details. +type DataLakeDatasetSchemaField struct { + + // Indicate if the field is required or not. + // + // This member is required. + IsRequired *bool + + // The dataset field name. + // + // This member is required. + Name *string + + // The dataset field type. + // + // This member is required. + Type DataLakeDatasetSchemaFieldType + + noSmithyDocumentSerde +} + type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/service/supplychain/validators.go b/service/supplychain/validators.go index dd9430a272b..e87cef69908 100644 --- a/service/supplychain/validators.go +++ b/service/supplychain/validators.go @@ -5,6 +5,7 @@ package supplychain import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/service/supplychain/types" smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" ) @@ -29,6 +30,86 @@ func (m *validateOpCreateBillOfMaterialsImportJob) HandleInitialize(ctx context. return next.HandleInitialize(ctx, in) } +type validateOpCreateDataIntegrationFlow struct { +} + +func (*validateOpCreateDataIntegrationFlow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateDataIntegrationFlow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateDataIntegrationFlowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateDataIntegrationFlowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateDataLakeDataset struct { +} + +func (*validateOpCreateDataLakeDataset) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateDataLakeDataset) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateDataLakeDatasetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateDataLakeDatasetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteDataIntegrationFlow struct { +} + +func (*validateOpDeleteDataIntegrationFlow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteDataIntegrationFlow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteDataIntegrationFlowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteDataIntegrationFlowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteDataLakeDataset struct { +} + +func (*validateOpDeleteDataLakeDataset) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteDataLakeDataset) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteDataLakeDatasetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteDataLakeDatasetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetBillOfMaterialsImportJob struct { } @@ -49,6 +130,106 @@ func (m *validateOpGetBillOfMaterialsImportJob) HandleInitialize(ctx context.Con return next.HandleInitialize(ctx, in) } +type validateOpGetDataIntegrationFlow struct { +} + +func (*validateOpGetDataIntegrationFlow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDataIntegrationFlow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDataIntegrationFlowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDataIntegrationFlowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetDataLakeDataset struct { +} + +func (*validateOpGetDataLakeDataset) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetDataLakeDataset) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetDataLakeDatasetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetDataLakeDatasetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListDataIntegrationFlows struct { +} + +func (*validateOpListDataIntegrationFlows) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListDataIntegrationFlows) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListDataIntegrationFlowsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListDataIntegrationFlowsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListDataLakeDatasets struct { +} + +func (*validateOpListDataLakeDatasets) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListDataLakeDatasets) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListDataLakeDatasetsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListDataLakeDatasetsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListTagsForResource struct { +} + +func (*validateOpListTagsForResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListTagsForResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListTagsForResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpSendDataIntegrationEvent struct { } @@ -69,28 +250,172 @@ func (m *validateOpSendDataIntegrationEvent) HandleInitialize(ctx context.Contex return next.HandleInitialize(ctx, in) } +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateDataIntegrationFlow struct { +} + +func (*validateOpUpdateDataIntegrationFlow) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateDataIntegrationFlow) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateDataIntegrationFlowInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateDataIntegrationFlowInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateDataLakeDataset struct { +} + +func (*validateOpUpdateDataLakeDataset) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateDataLakeDataset) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateDataLakeDatasetInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateDataLakeDatasetInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + func addOpCreateBillOfMaterialsImportJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateBillOfMaterialsImportJob{}, middleware.After) } +func addOpCreateDataIntegrationFlowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateDataIntegrationFlow{}, middleware.After) +} + +func addOpCreateDataLakeDatasetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateDataLakeDataset{}, middleware.After) +} + +func addOpDeleteDataIntegrationFlowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteDataIntegrationFlow{}, middleware.After) +} + +func addOpDeleteDataLakeDatasetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteDataLakeDataset{}, middleware.After) +} + func addOpGetBillOfMaterialsImportJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBillOfMaterialsImportJob{}, middleware.After) } +func addOpGetDataIntegrationFlowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDataIntegrationFlow{}, middleware.After) +} + +func addOpGetDataLakeDatasetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetDataLakeDataset{}, middleware.After) +} + +func addOpListDataIntegrationFlowsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListDataIntegrationFlows{}, middleware.After) +} + +func addOpListDataLakeDatasetsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListDataLakeDatasets{}, middleware.After) +} + +func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After) +} + func addOpSendDataIntegrationEventValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpSendDataIntegrationEvent{}, middleware.After) } -func validateOpCreateBillOfMaterialsImportJobInput(v *CreateBillOfMaterialsImportJobInput) error { +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateDataIntegrationFlowValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateDataIntegrationFlow{}, middleware.After) +} + +func addOpUpdateDataLakeDatasetValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateDataLakeDataset{}, middleware.After) +} + +func validateDataIntegrationFlowDatasetSourceConfiguration(v *types.DataIntegrationFlowDatasetSourceConfiguration) error { if v == nil { return nil } - invalidParams := smithy.InvalidParamsError{Context: "CreateBillOfMaterialsImportJobInput"} - if v.InstanceId == nil { - invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowDatasetSourceConfiguration"} + if v.DatasetIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatasetIdentifier")) } - if v.S3uri == nil { - invalidParams.Add(smithy.NewErrParamRequired("S3uri")) + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataIntegrationFlowDatasetTargetConfiguration(v *types.DataIntegrationFlowDatasetTargetConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowDatasetTargetConfiguration"} + if v.DatasetIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DatasetIdentifier")) } if invalidParams.Len() > 0 { return invalidParams @@ -99,16 +424,16 @@ func validateOpCreateBillOfMaterialsImportJobInput(v *CreateBillOfMaterialsImpor } } -func validateOpGetBillOfMaterialsImportJobInput(v *GetBillOfMaterialsImportJobInput) error { +func validateDataIntegrationFlowS3SourceConfiguration(v *types.DataIntegrationFlowS3SourceConfiguration) error { if v == nil { return nil } - invalidParams := smithy.InvalidParamsError{Context: "GetBillOfMaterialsImportJobInput"} - if v.InstanceId == nil { - invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowS3SourceConfiguration"} + if v.BucketName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketName")) } - if v.JobId == nil { - invalidParams.Add(smithy.NewErrParamRequired("JobId")) + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) } if invalidParams.Len() > 0 { return invalidParams @@ -117,21 +442,431 @@ func validateOpGetBillOfMaterialsImportJobInput(v *GetBillOfMaterialsImportJobIn } } -func validateOpSendDataIntegrationEventInput(v *SendDataIntegrationEventInput) error { +func validateDataIntegrationFlowS3TargetConfiguration(v *types.DataIntegrationFlowS3TargetConfiguration) error { if v == nil { return nil } - invalidParams := smithy.InvalidParamsError{Context: "SendDataIntegrationEventInput"} - if v.InstanceId == nil { - invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowS3TargetConfiguration"} + if v.BucketName == nil { + invalidParams.Add(smithy.NewErrParamRequired("BucketName")) } - if len(v.EventType) == 0 { - invalidParams.Add(smithy.NewErrParamRequired("EventType")) + if v.Prefix == nil { + invalidParams.Add(smithy.NewErrParamRequired("Prefix")) } - if v.Data == nil { - invalidParams.Add(smithy.NewErrParamRequired("Data")) + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil } - if v.EventGroupId == nil { +} + +func validateDataIntegrationFlowSource(v *types.DataIntegrationFlowSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowSource"} + if len(v.SourceType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SourceType")) + } + if v.SourceName == nil { + invalidParams.Add(smithy.NewErrParamRequired("SourceName")) + } + if v.S3Source != nil { + if err := validateDataIntegrationFlowS3SourceConfiguration(v.S3Source); err != nil { + invalidParams.AddNested("S3Source", err.(smithy.InvalidParamsError)) + } + } + if v.DatasetSource != nil { + if err := validateDataIntegrationFlowDatasetSourceConfiguration(v.DatasetSource); err != nil { + invalidParams.AddNested("DatasetSource", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataIntegrationFlowSourceList(v []types.DataIntegrationFlowSource) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowSourceList"} + for i := range v { + if err := validateDataIntegrationFlowSource(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataIntegrationFlowSQLTransformationConfiguration(v *types.DataIntegrationFlowSQLTransformationConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowSQLTransformationConfiguration"} + if v.Query == nil { + invalidParams.Add(smithy.NewErrParamRequired("Query")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataIntegrationFlowTarget(v *types.DataIntegrationFlowTarget) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowTarget"} + if len(v.TargetType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("TargetType")) + } + if v.S3Target != nil { + if err := validateDataIntegrationFlowS3TargetConfiguration(v.S3Target); err != nil { + invalidParams.AddNested("S3Target", err.(smithy.InvalidParamsError)) + } + } + if v.DatasetTarget != nil { + if err := validateDataIntegrationFlowDatasetTargetConfiguration(v.DatasetTarget); err != nil { + invalidParams.AddNested("DatasetTarget", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataIntegrationFlowTransformation(v *types.DataIntegrationFlowTransformation) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataIntegrationFlowTransformation"} + if len(v.TransformationType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("TransformationType")) + } + if v.SqlTransformation != nil { + if err := validateDataIntegrationFlowSQLTransformationConfiguration(v.SqlTransformation); err != nil { + invalidParams.AddNested("SqlTransformation", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataLakeDatasetSchema(v *types.DataLakeDatasetSchema) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataLakeDatasetSchema"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Fields == nil { + invalidParams.Add(smithy.NewErrParamRequired("Fields")) + } else if v.Fields != nil { + if err := validateDataLakeDatasetSchemaFieldList(v.Fields); err != nil { + invalidParams.AddNested("Fields", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataLakeDatasetSchemaField(v *types.DataLakeDatasetSchemaField) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataLakeDatasetSchemaField"} + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if len(v.Type) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("Type")) + } + if v.IsRequired == nil { + invalidParams.Add(smithy.NewErrParamRequired("IsRequired")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateDataLakeDatasetSchemaFieldList(v []types.DataLakeDatasetSchemaField) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DataLakeDatasetSchemaFieldList"} + for i := range v { + if err := validateDataLakeDatasetSchemaField(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateBillOfMaterialsImportJobInput(v *CreateBillOfMaterialsImportJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBillOfMaterialsImportJobInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.S3uri == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3uri")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateDataIntegrationFlowInput(v *CreateDataIntegrationFlowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateDataIntegrationFlowInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Sources == nil { + invalidParams.Add(smithy.NewErrParamRequired("Sources")) + } else if v.Sources != nil { + if err := validateDataIntegrationFlowSourceList(v.Sources); err != nil { + invalidParams.AddNested("Sources", err.(smithy.InvalidParamsError)) + } + } + if v.Transformation == nil { + invalidParams.Add(smithy.NewErrParamRequired("Transformation")) + } else if v.Transformation != nil { + if err := validateDataIntegrationFlowTransformation(v.Transformation); err != nil { + invalidParams.AddNested("Transformation", err.(smithy.InvalidParamsError)) + } + } + if v.Target == nil { + invalidParams.Add(smithy.NewErrParamRequired("Target")) + } else if v.Target != nil { + if err := validateDataIntegrationFlowTarget(v.Target); err != nil { + invalidParams.AddNested("Target", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateDataLakeDatasetInput(v *CreateDataLakeDatasetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateDataLakeDatasetInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Schema != nil { + if err := validateDataLakeDatasetSchema(v.Schema); err != nil { + invalidParams.AddNested("Schema", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteDataIntegrationFlowInput(v *DeleteDataIntegrationFlowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteDataIntegrationFlowInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteDataLakeDatasetInput(v *DeleteDataLakeDatasetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteDataLakeDatasetInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetBillOfMaterialsImportJobInput(v *GetBillOfMaterialsImportJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBillOfMaterialsImportJobInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.JobId == nil { + invalidParams.Add(smithy.NewErrParamRequired("JobId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetDataIntegrationFlowInput(v *GetDataIntegrationFlowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDataIntegrationFlowInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetDataLakeDatasetInput(v *GetDataLakeDatasetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetDataLakeDatasetInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListDataIntegrationFlowsInput(v *ListDataIntegrationFlowsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListDataIntegrationFlowsInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListDataLakeDatasetsInput(v *ListDataLakeDatasetsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListDataLakeDatasetsInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSendDataIntegrationEventInput(v *SendDataIntegrationEventInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SendDataIntegrationEventInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if len(v.EventType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("EventType")) + } + if v.Data == nil { + invalidParams.Add(smithy.NewErrParamRequired("Data")) + } + if v.EventGroupId == nil { invalidParams.Add(smithy.NewErrParamRequired("EventGroupId")) } if invalidParams.Len() > 0 { @@ -140,3 +875,93 @@ func validateOpSendDataIntegrationEventInput(v *SendDataIntegrationEventInput) e return nil } } + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateDataIntegrationFlowInput(v *UpdateDataIntegrationFlowInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateDataIntegrationFlowInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if v.Sources != nil { + if err := validateDataIntegrationFlowSourceList(v.Sources); err != nil { + invalidParams.AddNested("Sources", err.(smithy.InvalidParamsError)) + } + } + if v.Transformation != nil { + if err := validateDataIntegrationFlowTransformation(v.Transformation); err != nil { + invalidParams.AddNested("Transformation", err.(smithy.InvalidParamsError)) + } + } + if v.Target != nil { + if err := validateDataIntegrationFlowTarget(v.Target); err != nil { + invalidParams.AddNested("Target", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateDataLakeDatasetInput(v *UpdateDataLakeDatasetInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateDataLakeDatasetInput"} + if v.InstanceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("InstanceId")) + } + if v.Namespace == nil { + invalidParams.Add(smithy.NewErrParamRequired("Namespace")) + } + if v.Name == nil { + invalidParams.Add(smithy.NewErrParamRequired("Name")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/service/timestreaminfluxdb/api_op_CreateDbInstance.go b/service/timestreaminfluxdb/api_op_CreateDbInstance.go index 41b2adfd35b..536891fa578 100644 --- a/service/timestreaminfluxdb/api_op_CreateDbInstance.go +++ b/service/timestreaminfluxdb/api_op_CreateDbInstance.go @@ -101,6 +101,15 @@ type CreateDbInstanceInput struct { // InfluxDB organization is a workspace for a group of users. Organization *string + // The port number on which InfluxDB accepts connections. + // + // Valid Values: 1024-65535 + // + // Default: 8086 + // + // Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 + Port *int32 + // Configures the DB instance with a public IP to facilitate access. PubliclyAccessible *bool @@ -172,6 +181,10 @@ type CreateDbInstanceOutput struct { // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration + // The port number on which InfluxDB accepts connections. The default value is + // 8086. + Port *int32 + // Indicates if the DB instance has a public IP to facilitate access. PubliclyAccessible *bool diff --git a/service/timestreaminfluxdb/api_op_DeleteDbInstance.go b/service/timestreaminfluxdb/api_op_DeleteDbInstance.go index b1e8c7305d9..548298072c1 100644 --- a/service/timestreaminfluxdb/api_op_DeleteDbInstance.go +++ b/service/timestreaminfluxdb/api_op_DeleteDbInstance.go @@ -91,6 +91,9 @@ type DeleteDbInstanceOutput struct { // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration + // The port number on which InfluxDB accepts connections. + Port *int32 + // Indicates if the DB instance has a public IP to facilitate access. PubliclyAccessible *bool diff --git a/service/timestreaminfluxdb/api_op_GetDbInstance.go b/service/timestreaminfluxdb/api_op_GetDbInstance.go index 0f030fd53be..98655163cc8 100644 --- a/service/timestreaminfluxdb/api_op_GetDbInstance.go +++ b/service/timestreaminfluxdb/api_op_GetDbInstance.go @@ -91,6 +91,9 @@ type GetDbInstanceOutput struct { // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration + // The port number on which InfluxDB accepts connections. + Port *int32 + // Indicates if the DB instance has a public IP to facilitate access. PubliclyAccessible *bool diff --git a/service/timestreaminfluxdb/api_op_UpdateDbInstance.go b/service/timestreaminfluxdb/api_op_UpdateDbInstance.go index 4dda1bd437f..76c733d23b5 100644 --- a/service/timestreaminfluxdb/api_op_UpdateDbInstance.go +++ b/service/timestreaminfluxdb/api_op_UpdateDbInstance.go @@ -49,6 +49,17 @@ type UpdateDbInstanceInput struct { // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration + // The port number on which InfluxDB accepts connections. + // + // If you change the Port value, your database restarts immediately. + // + // Valid Values: 1024-65535 + // + // Default: 8086 + // + // Constraints: The value can't be 2375-2376, 7788-7799, 8090, or 51678-51680 + Port *int32 + noSmithyDocumentSerde } @@ -106,6 +117,9 @@ type UpdateDbInstanceOutput struct { // Configuration for sending InfluxDB engine logs to send to specified S3 bucket. LogDeliveryConfiguration *types.LogDeliveryConfiguration + // The port number on which InfluxDB accepts connections. + Port *int32 + // Indicates if the DB instance has a public IP to facilitate access. PubliclyAccessible *bool diff --git a/service/timestreaminfluxdb/deserializers.go b/service/timestreaminfluxdb/deserializers.go index cf1afd2a995..e5856e5d939 100644 --- a/service/timestreaminfluxdb/deserializers.go +++ b/service/timestreaminfluxdb/deserializers.go @@ -1762,6 +1762,19 @@ func awsAwsjson10_deserializeDocumentDbInstanceSummary(v **types.DbInstanceSumma sv.Name = ptr.String(jtv) } + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + case "status": if value != nil { jtv, ok := value.(string) @@ -1915,6 +1928,59 @@ func awsAwsjson10_deserializeDocumentDbParameterGroupSummaryList(v *[]types.DbPa return nil } +func awsAwsjson10_deserializeDocumentDuration(v **types.Duration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Duration + if *v == nil { + sv = &types.Duration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "durationType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DurationType to be of type string, got %T instead", value) + } + sv.DurationType = types.DurationType(jtv) + } + + case "value": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Value = ptr.Int64(i64) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Parameters, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1946,6 +2012,65 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa sv.FluxLogEnabled = ptr.Bool(jtv) } + case "httpIdleTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpIdleTimeout, value); err != nil { + return err + } + + case "httpReadHeaderTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpReadHeaderTimeout, value); err != nil { + return err + } + + case "httpReadTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpReadTimeout, value); err != nil { + return err + } + + case "httpWriteTimeout": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.HttpWriteTimeout, value); err != nil { + return err + } + + case "influxqlMaxSelectBuckets": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InfluxqlMaxSelectBuckets = ptr.Int64(i64) + } + + case "influxqlMaxSelectPoint": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InfluxqlMaxSelectPoint = ptr.Int64(i64) + } + + case "influxqlMaxSelectSeries": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.InfluxqlMaxSelectSeries = ptr.Int64(i64) + } + case "logLevel": if value != nil { jtv, ok := value.(string) @@ -1973,6 +2098,15 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa sv.NoTasks = ptr.Bool(jtv) } + case "pprofDisabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.PprofDisabled = ptr.Bool(jtv) + } + case "queryConcurrency": if value != nil { jtv, ok := value.(json.Number) @@ -1986,6 +2120,45 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa sv.QueryConcurrency = ptr.Int32(int32(i64)) } + case "queryInitialMemoryBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryInitialMemoryBytes = ptr.Int64(i64) + } + + case "queryMaxMemoryBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryMaxMemoryBytes = ptr.Int64(i64) + } + + case "queryMemoryBytes": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.QueryMemoryBytes = ptr.Int64(i64) + } + case "queryQueueSize": if value != nil { jtv, ok := value.(json.Number) @@ -1999,6 +2172,161 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa sv.QueryQueueSize = ptr.Int32(int32(i64)) } + case "sessionLength": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.SessionLength = ptr.Int32(int32(i64)) + } + + case "sessionRenewDisabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.SessionRenewDisabled = ptr.Bool(jtv) + } + + case "storageCacheMaxMemorySize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageCacheMaxMemorySize = ptr.Int64(i64) + } + + case "storageCacheSnapshotMemorySize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageCacheSnapshotMemorySize = ptr.Int64(i64) + } + + case "storageCacheSnapshotWriteColdDuration": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageCacheSnapshotWriteColdDuration, value); err != nil { + return err + } + + case "storageCompactFullWriteColdDuration": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageCompactFullWriteColdDuration, value); err != nil { + return err + } + + case "storageCompactThroughputBurst": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageCompactThroughputBurst = ptr.Int64(i64) + } + + case "storageMaxConcurrentCompactions": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageMaxConcurrentCompactions = ptr.Int32(int32(i64)) + } + + case "storageMaxIndexLogFileSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageMaxIndexLogFileSize = ptr.Int64(i64) + } + + case "storageNoValidateFieldSize": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.StorageNoValidateFieldSize = ptr.Bool(jtv) + } + + case "storageRetentionCheckInterval": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageRetentionCheckInterval, value); err != nil { + return err + } + + case "storageSeriesFileMaxConcurrentSnapshotCompactions": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageSeriesFileMaxConcurrentSnapshotCompactions = ptr.Int32(int32(i64)) + } + + case "storageSeriesIdSetCacheSize": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Long to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageSeriesIdSetCacheSize = ptr.Int64(i64) + } + + case "storageWalMaxConcurrentWrites": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Integer to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.StorageWalMaxConcurrentWrites = ptr.Int32(int32(i64)) + } + + case "storageWalMaxWriteDelay": + if err := awsAwsjson10_deserializeDocumentDuration(&sv.StorageWalMaxWriteDelay, value); err != nil { + return err + } + case "tracingType": if value != nil { jtv, ok := value.(string) @@ -2008,6 +2336,15 @@ func awsAwsjson10_deserializeDocumentInfluxDBv2Parameters(v **types.InfluxDBv2Pa sv.TracingType = types.TracingType(jtv) } + case "uiDisabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", value) + } + sv.UiDisabled = ptr.Bool(jtv) + } + default: _, _ = key, value @@ -2620,6 +2957,19 @@ func awsAwsjson10_deserializeOpDocumentCreateDbInstanceOutput(v **CreateDbInstan sv.Name = ptr.String(jtv) } + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + case "publiclyAccessible": if value != nil { jtv, ok := value.(bool) @@ -2868,6 +3218,19 @@ func awsAwsjson10_deserializeOpDocumentDeleteDbInstanceOutput(v **DeleteDbInstan sv.Name = ptr.String(jtv) } + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + case "publiclyAccessible": if value != nil { jtv, ok := value.(bool) @@ -3044,6 +3407,19 @@ func awsAwsjson10_deserializeOpDocumentGetDbInstanceOutput(v **GetDbInstanceOutp sv.Name = ptr.String(jtv) } + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + case "publiclyAccessible": if value != nil { jtv, ok := value.(bool) @@ -3418,6 +3794,19 @@ func awsAwsjson10_deserializeOpDocumentUpdateDbInstanceOutput(v **UpdateDbInstan sv.Name = ptr.String(jtv) } + case "port": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected Port to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Port = ptr.Int32(int32(i64)) + } + case "publiclyAccessible": if value != nil { jtv, ok := value.(bool) diff --git a/service/timestreaminfluxdb/serializers.go b/service/timestreaminfluxdb/serializers.go index c20f1d80461..31328e74005 100644 --- a/service/timestreaminfluxdb/serializers.go +++ b/service/timestreaminfluxdb/serializers.go @@ -686,6 +686,23 @@ func (m *awsAwsjson10_serializeOpUpdateDbInstance) HandleSerialize(ctx context.C span.End() return next.HandleSerialize(ctx, in) } +func awsAwsjson10_serializeDocumentDuration(v *types.Duration, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.DurationType) > 0 { + ok := object.Key("durationType") + ok.String(string(v.DurationType)) + } + + if v.Value != nil { + ok := object.Key("value") + ok.Long(*v.Value) + } + + return nil +} + func awsAwsjson10_serializeDocumentInfluxDBv2Parameters(v *types.InfluxDBv2Parameters, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -695,6 +712,49 @@ func awsAwsjson10_serializeDocumentInfluxDBv2Parameters(v *types.InfluxDBv2Param ok.Boolean(*v.FluxLogEnabled) } + if v.HttpIdleTimeout != nil { + ok := object.Key("httpIdleTimeout") + if err := awsAwsjson10_serializeDocumentDuration(v.HttpIdleTimeout, ok); err != nil { + return err + } + } + + if v.HttpReadHeaderTimeout != nil { + ok := object.Key("httpReadHeaderTimeout") + if err := awsAwsjson10_serializeDocumentDuration(v.HttpReadHeaderTimeout, ok); err != nil { + return err + } + } + + if v.HttpReadTimeout != nil { + ok := object.Key("httpReadTimeout") + if err := awsAwsjson10_serializeDocumentDuration(v.HttpReadTimeout, ok); err != nil { + return err + } + } + + if v.HttpWriteTimeout != nil { + ok := object.Key("httpWriteTimeout") + if err := awsAwsjson10_serializeDocumentDuration(v.HttpWriteTimeout, ok); err != nil { + return err + } + } + + if v.InfluxqlMaxSelectBuckets != nil { + ok := object.Key("influxqlMaxSelectBuckets") + ok.Long(*v.InfluxqlMaxSelectBuckets) + } + + if v.InfluxqlMaxSelectPoint != nil { + ok := object.Key("influxqlMaxSelectPoint") + ok.Long(*v.InfluxqlMaxSelectPoint) + } + + if v.InfluxqlMaxSelectSeries != nil { + ok := object.Key("influxqlMaxSelectSeries") + ok.Long(*v.InfluxqlMaxSelectSeries) + } + if len(v.LogLevel) > 0 { ok := object.Key("logLevel") ok.String(string(v.LogLevel)) @@ -710,21 +770,129 @@ func awsAwsjson10_serializeDocumentInfluxDBv2Parameters(v *types.InfluxDBv2Param ok.Boolean(*v.NoTasks) } + if v.PprofDisabled != nil { + ok := object.Key("pprofDisabled") + ok.Boolean(*v.PprofDisabled) + } + if v.QueryConcurrency != nil { ok := object.Key("queryConcurrency") ok.Integer(*v.QueryConcurrency) } + if v.QueryInitialMemoryBytes != nil { + ok := object.Key("queryInitialMemoryBytes") + ok.Long(*v.QueryInitialMemoryBytes) + } + + if v.QueryMaxMemoryBytes != nil { + ok := object.Key("queryMaxMemoryBytes") + ok.Long(*v.QueryMaxMemoryBytes) + } + + if v.QueryMemoryBytes != nil { + ok := object.Key("queryMemoryBytes") + ok.Long(*v.QueryMemoryBytes) + } + if v.QueryQueueSize != nil { ok := object.Key("queryQueueSize") ok.Integer(*v.QueryQueueSize) } + if v.SessionLength != nil { + ok := object.Key("sessionLength") + ok.Integer(*v.SessionLength) + } + + if v.SessionRenewDisabled != nil { + ok := object.Key("sessionRenewDisabled") + ok.Boolean(*v.SessionRenewDisabled) + } + + if v.StorageCacheMaxMemorySize != nil { + ok := object.Key("storageCacheMaxMemorySize") + ok.Long(*v.StorageCacheMaxMemorySize) + } + + if v.StorageCacheSnapshotMemorySize != nil { + ok := object.Key("storageCacheSnapshotMemorySize") + ok.Long(*v.StorageCacheSnapshotMemorySize) + } + + if v.StorageCacheSnapshotWriteColdDuration != nil { + ok := object.Key("storageCacheSnapshotWriteColdDuration") + if err := awsAwsjson10_serializeDocumentDuration(v.StorageCacheSnapshotWriteColdDuration, ok); err != nil { + return err + } + } + + if v.StorageCompactFullWriteColdDuration != nil { + ok := object.Key("storageCompactFullWriteColdDuration") + if err := awsAwsjson10_serializeDocumentDuration(v.StorageCompactFullWriteColdDuration, ok); err != nil { + return err + } + } + + if v.StorageCompactThroughputBurst != nil { + ok := object.Key("storageCompactThroughputBurst") + ok.Long(*v.StorageCompactThroughputBurst) + } + + if v.StorageMaxConcurrentCompactions != nil { + ok := object.Key("storageMaxConcurrentCompactions") + ok.Integer(*v.StorageMaxConcurrentCompactions) + } + + if v.StorageMaxIndexLogFileSize != nil { + ok := object.Key("storageMaxIndexLogFileSize") + ok.Long(*v.StorageMaxIndexLogFileSize) + } + + if v.StorageNoValidateFieldSize != nil { + ok := object.Key("storageNoValidateFieldSize") + ok.Boolean(*v.StorageNoValidateFieldSize) + } + + if v.StorageRetentionCheckInterval != nil { + ok := object.Key("storageRetentionCheckInterval") + if err := awsAwsjson10_serializeDocumentDuration(v.StorageRetentionCheckInterval, ok); err != nil { + return err + } + } + + if v.StorageSeriesFileMaxConcurrentSnapshotCompactions != nil { + ok := object.Key("storageSeriesFileMaxConcurrentSnapshotCompactions") + ok.Integer(*v.StorageSeriesFileMaxConcurrentSnapshotCompactions) + } + + if v.StorageSeriesIdSetCacheSize != nil { + ok := object.Key("storageSeriesIdSetCacheSize") + ok.Long(*v.StorageSeriesIdSetCacheSize) + } + + if v.StorageWalMaxConcurrentWrites != nil { + ok := object.Key("storageWalMaxConcurrentWrites") + ok.Integer(*v.StorageWalMaxConcurrentWrites) + } + + if v.StorageWalMaxWriteDelay != nil { + ok := object.Key("storageWalMaxWriteDelay") + if err := awsAwsjson10_serializeDocumentDuration(v.StorageWalMaxWriteDelay, ok); err != nil { + return err + } + } + if len(v.TracingType) > 0 { ok := object.Key("tracingType") ok.String(string(v.TracingType)) } + if v.UiDisabled != nil { + ok := object.Key("uiDisabled") + ok.Boolean(*v.UiDisabled) + } + return nil } @@ -877,6 +1045,11 @@ func awsAwsjson10_serializeOpDocumentCreateDbInstanceInput(v *CreateDbInstanceIn ok.String(*v.Password) } + if v.Port != nil { + ok := object.Key("port") + ok.Integer(*v.Port) + } + if v.PubliclyAccessible != nil { ok := object.Key("publiclyAccessible") ok.Boolean(*v.PubliclyAccessible) @@ -1093,5 +1266,10 @@ func awsAwsjson10_serializeOpDocumentUpdateDbInstanceInput(v *UpdateDbInstanceIn } } + if v.Port != nil { + ok := object.Key("port") + ok.Integer(*v.Port) + } + return nil } diff --git a/service/timestreaminfluxdb/types/enums.go b/service/timestreaminfluxdb/types/enums.go index 7c63df3db2c..2644db3c806 100644 --- a/service/timestreaminfluxdb/types/enums.go +++ b/service/timestreaminfluxdb/types/enums.go @@ -73,6 +73,29 @@ func (DeploymentType) Values() []DeploymentType { } } +type DurationType string + +// Enum values for DurationType +const ( + DurationTypeHours DurationType = "hours" + DurationTypeMinutes DurationType = "minutes" + DurationTypeSeconds DurationType = "seconds" + DurationTypeMilliseconds DurationType = "milliseconds" +) + +// Values returns all known values for DurationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (DurationType) Values() []DurationType { + return []DurationType{ + "hours", + "minutes", + "seconds", + "milliseconds", + } +} + type LogLevel string // Enum values for LogLevel diff --git a/service/timestreaminfluxdb/types/types.go b/service/timestreaminfluxdb/types/types.go index e95fc9539e8..df81f72d3f8 100644 --- a/service/timestreaminfluxdb/types/types.go +++ b/service/timestreaminfluxdb/types/types.go @@ -40,6 +40,9 @@ type DbInstanceSummary struct { // The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086. Endpoint *string + // The port number on which InfluxDB accepts connections. + Port *int32 + // The status of the DB instance. Status Status @@ -70,6 +73,22 @@ type DbParameterGroupSummary struct { noSmithyDocumentSerde } +// Duration for InfluxDB parameters in Timestream for InfluxDB. +type Duration struct { + + // The type of duration for InfluxDB parameters. + // + // This member is required. + DurationType DurationType + + // The value of duration for InfluxDB parameters. + // + // This member is required. + Value *int64 + + noSmithyDocumentSerde +} + // All the customer-modifiable InfluxDB v2 parameters in Timestream for InfluxDB. type InfluxDBv2Parameters struct { @@ -78,6 +97,49 @@ type InfluxDBv2Parameters struct { // Default: false FluxLogEnabled *bool + // Maximum duration the server should keep established connections alive while + // waiting for new requests. Set to 0 for no timeout. + // + // Default: 3 minutes + HttpIdleTimeout *Duration + + // Maximum duration the server should try to read HTTP headers for new requests. + // Set to 0 for no timeout. + // + // Default: 10 seconds + HttpReadHeaderTimeout *Duration + + // Maximum duration the server should try to read the entirety of new requests. + // Set to 0 for no timeout. + // + // Default: 0 + HttpReadTimeout *Duration + + // Maximum duration the server should spend processing and responding to write + // requests. Set to 0 for no timeout. + // + // Default: 0 + HttpWriteTimeout *Duration + + // Maximum number of group by time buckets a SELECT statement can create. 0 allows + // an unlimited number of buckets. + // + // Default: 0 + InfluxqlMaxSelectBuckets *int64 + + // Maximum number of points a SELECT statement can process. 0 allows an unlimited + // number of points. InfluxDB checks the point count every second (so queries + // exceeding the maximum aren’t immediately aborted). + // + // Default: 0 + InfluxqlMaxSelectPoint *int64 + + // Maximum number of series a SELECT statement can return. 0 allows an unlimited + // number of series. + // + // Default: 0 + InfluxqlMaxSelectSeries *int64 + // Log output level. InfluxDB outputs log entries with severity levels greater // than or equal to the level specified. // @@ -98,12 +160,36 @@ type InfluxDBv2Parameters struct { // Default: false NoTasks *bool + // Disable the /debug/pprof HTTP endpoint. This endpoint provides runtime + // profiling data and can be helpful when debugging. + // + // Default: false + PprofDisabled *bool + // Number of queries allowed to execute concurrently. Setting to 0 allows an // unlimited number of concurrent queries. // // Default: 0 QueryConcurrency *int32 + // Initial bytes of memory allocated for a query. + // + // Default: 0 + QueryInitialMemoryBytes *int64 + + // Maximum number of queries allowed in execution queue. When queue limit is + // reached, new queries are rejected. Setting to 0 allows an unlimited number of + // queries in the queue. + // + // Default: 0 + QueryMaxMemoryBytes *int64 + + // Maximum bytes of memory allowed for a single query. Must be greater or equal to + // queryInitialMemoryBytes. + // + // Default: 0 + QueryMemoryBytes *int64 + // Maximum number of queries allowed in execution queue. When queue limit is // reached, new queries are rejected. Setting to 0 allows an unlimited number of // queries in the queue. @@ -111,10 +197,116 @@ type InfluxDBv2Parameters struct { // Default: 0 QueryQueueSize *int32 + // Specifies the Time to Live (TTL) in minutes for newly created user sessions. + // + // Default: 60 + SessionLength *int32 + + // Disables automatically extending a user’s session TTL on each request. By + // default, every request sets the session’s expiration time to five minutes from + // now. When disabled, sessions expire after the specified [session length]and the user is + // redirected to the login page, even if recently active. + // + // Default: false + // + // [session length]: https://docs.influxdata.com/influxdb/v2/reference/config-options/#session-length + SessionRenewDisabled *bool + + // Maximum size (in bytes) a shard’s cache can reach before it starts rejecting + // writes. Must be greater than storageCacheSnapShotMemorySize and lower than + // instance’s total memory capacity. We recommend setting it to below 15% of the + // total memory capacity. + // + // Default: 1073741824 + StorageCacheMaxMemorySize *int64 + + // Size (in bytes) at which the storage engine will snapshot the cache and write + // it to a TSM file to make more memory available. Must not be greater than + // storageCacheMaxMemorySize. + // + // Default: 26214400 + StorageCacheSnapshotMemorySize *int64 + + // Duration at which the storage engine will snapshot the cache and write it to a + // new TSM file if the shard hasn’t received writes or deletes. + // + // Default: 10 minutes + StorageCacheSnapshotWriteColdDuration *Duration + + // Duration at which the storage engine will compact all TSM files in a shard if + // it hasn't received writes or deletes. + // + // Default: 4 hours + StorageCompactFullWriteColdDuration *Duration + + // Rate limit (in bytes per second) that TSM compactions can write to disk. + // + // Default: 50331648 + StorageCompactThroughputBurst *int64 + + // Maximum number of full and level compactions that can run concurrently. A value + // of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. Any number greater + // than zero limits compactions to that value. This setting does not apply to cache + // snapshotting. + // + // Default: 0 + StorageMaxConcurrentCompactions *int32 + + // Size (in bytes) at which an index write-ahead log (WAL) file will compact into + // an index file. Lower sizes will cause log files to be compacted more quickly and + // result in lower heap usage at the expense of write throughput. + // + // Default: 1048576 + StorageMaxIndexLogFileSize *int64 + + // Skip field size validation on incoming write requests. + // + // Default: false + StorageNoValidateFieldSize *bool + + // Interval of retention policy enforcement checks. Must be greater than 0. + // + // Default: 30 minutes + StorageRetentionCheckInterval *Duration + + // Maximum number of snapshot compactions that can run concurrently across all + // series partitions in a database. + // + // Default: 0 + StorageSeriesFileMaxConcurrentSnapshotCompactions *int32 + + // Size of the internal cache used in the TSI index to store previously calculated + // series results. Cached results are returned quickly rather than needing to be + // recalculated when a subsequent query with the same tag key/value predicate is + // executed. Setting this value to 0 will disable the cache and may decrease query + // performance. + // + // Default: 100 + StorageSeriesIdSetCacheSize *int64 + + // Maximum number writes to the WAL directory to attempt at the same time. Setting + // this value to 0 results in number of processing units available x2. + // + // Default: 0 + StorageWalMaxConcurrentWrites *int32 + + // Maximum amount of time a write request to the WAL directory will wait when the [maximum number of concurrent active writes to the WAL directory has been met] + // . Set to 0 to disable the timeout. + // + // Default: 10 minutes + // + // [maximum number of concurrent active writes to the WAL directory has been met]: https://docs.influxdata.com/influxdb/v2/reference/config-options/#storage-wal-max-concurrent-writes + StorageWalMaxWriteDelay *Duration + // Enable tracing in InfluxDB and specifies the tracing type. Tracing is disabled // by default. TracingType TracingType + // Disable the InfluxDB user interface (UI). The UI is enabled by default. + // + // Default: false + UiDisabled *bool + noSmithyDocumentSerde } diff --git a/service/timestreaminfluxdb/validators.go b/service/timestreaminfluxdb/validators.go index cf18b3f8991..5e046958339 100644 --- a/service/timestreaminfluxdb/validators.go +++ b/service/timestreaminfluxdb/validators.go @@ -226,6 +226,76 @@ func addOpUpdateDbInstanceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpUpdateDbInstance{}, middleware.After) } +func validateDuration(v *types.Duration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Duration"} + if len(v.DurationType) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("DurationType")) + } + if v.Value == nil { + invalidParams.Add(smithy.NewErrParamRequired("Value")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateInfluxDBv2Parameters(v *types.InfluxDBv2Parameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "InfluxDBv2Parameters"} + if v.HttpIdleTimeout != nil { + if err := validateDuration(v.HttpIdleTimeout); err != nil { + invalidParams.AddNested("HttpIdleTimeout", err.(smithy.InvalidParamsError)) + } + } + if v.HttpReadHeaderTimeout != nil { + if err := validateDuration(v.HttpReadHeaderTimeout); err != nil { + invalidParams.AddNested("HttpReadHeaderTimeout", err.(smithy.InvalidParamsError)) + } + } + if v.HttpReadTimeout != nil { + if err := validateDuration(v.HttpReadTimeout); err != nil { + invalidParams.AddNested("HttpReadTimeout", err.(smithy.InvalidParamsError)) + } + } + if v.HttpWriteTimeout != nil { + if err := validateDuration(v.HttpWriteTimeout); err != nil { + invalidParams.AddNested("HttpWriteTimeout", err.(smithy.InvalidParamsError)) + } + } + if v.StorageCacheSnapshotWriteColdDuration != nil { + if err := validateDuration(v.StorageCacheSnapshotWriteColdDuration); err != nil { + invalidParams.AddNested("StorageCacheSnapshotWriteColdDuration", err.(smithy.InvalidParamsError)) + } + } + if v.StorageCompactFullWriteColdDuration != nil { + if err := validateDuration(v.StorageCompactFullWriteColdDuration); err != nil { + invalidParams.AddNested("StorageCompactFullWriteColdDuration", err.(smithy.InvalidParamsError)) + } + } + if v.StorageRetentionCheckInterval != nil { + if err := validateDuration(v.StorageRetentionCheckInterval); err != nil { + invalidParams.AddNested("StorageRetentionCheckInterval", err.(smithy.InvalidParamsError)) + } + } + if v.StorageWalMaxWriteDelay != nil { + if err := validateDuration(v.StorageWalMaxWriteDelay); err != nil { + invalidParams.AddNested("StorageWalMaxWriteDelay", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateLogDeliveryConfiguration(v *types.LogDeliveryConfiguration) error { if v == nil { return nil @@ -245,6 +315,25 @@ func validateLogDeliveryConfiguration(v *types.LogDeliveryConfiguration) error { } } +func validateParameters(v types.Parameters) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Parameters"} + switch uv := v.(type) { + case *types.ParametersMemberInfluxDBv2: + if err := validateInfluxDBv2Parameters(&uv.Value); err != nil { + invalidParams.AddNested("[InfluxDBv2]", err.(smithy.InvalidParamsError)) + } + + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateS3Configuration(v *types.S3Configuration) error { if v == nil { return nil @@ -306,6 +395,11 @@ func validateOpCreateDbParameterGroupInput(v *CreateDbParameterGroupInput) error if v.Name == nil { invalidParams.Add(smithy.NewErrParamRequired("Name")) } + if v.Parameters != nil { + if err := validateParameters(v.Parameters); err != nil { + invalidParams.AddNested("Parameters", err.(smithy.InvalidParamsError)) + } + } if invalidParams.Len() > 0 { return invalidParams } else { diff --git a/service/verifiedpermissions/api_op_CreateIdentitySource.go b/service/verifiedpermissions/api_op_CreateIdentitySource.go index 48cfcb4c7e1..a0d4bdf6abc 100644 --- a/service/verifiedpermissions/api_op_CreateIdentitySource.go +++ b/service/verifiedpermissions/api_op_CreateIdentitySource.go @@ -35,7 +35,7 @@ import ( // MyCorp::User::us-east-1_EXAMPLE|a1b2c3d4-5678-90ab-cdef-EXAMPLE11111 . // // - OpenID Connect (OIDC) provider: Namespace::[Entity -// type]::[principalIdClaim]|[user principal attribute] , for example +// type]::[entityIdPrefix]|[user principal attribute] , for example // MyCorp::User::MyOIDCProvider|a1b2c3d4-5678-90ab-cdef-EXAMPLE22222 . // // Verified Permissions is [eventually consistent] . It can take a few seconds for a new or changed diff --git a/service/verifiedpermissions/api_op_IsAuthorizedWithToken.go b/service/verifiedpermissions/api_op_IsAuthorizedWithToken.go index b092c0f7117..29b0f105726 100644 --- a/service/verifiedpermissions/api_op_IsAuthorizedWithToken.go +++ b/service/verifiedpermissions/api_op_IsAuthorizedWithToken.go @@ -19,8 +19,6 @@ import ( // policies in the specified policy store. The result of the decision is either // Allow or Deny , along with a list of the policies that resulted in the decision. // -// At this time, Verified Permissions accepts tokens from only Amazon Cognito. -// // Verified Permissions validates each token that is specified in a request by // checking its expiration date and its signature. // diff --git a/service/verifiedpermissions/api_op_UpdateIdentitySource.go b/service/verifiedpermissions/api_op_UpdateIdentitySource.go index 5e1546f26ce..73f37390e0d 100644 --- a/service/verifiedpermissions/api_op_UpdateIdentitySource.go +++ b/service/verifiedpermissions/api_op_UpdateIdentitySource.go @@ -52,11 +52,6 @@ type UpdateIdentitySourceInput struct { // Specifies the details required to communicate with the identity provider (IdP) // associated with this identity source. // - // At this time, the only valid member of this structure is a Amazon Cognito user - // pool configuration. - // - // You must specify a userPoolArn , and optionally, a ClientId . - // // This member is required. UpdateConfiguration types.UpdateConfiguration diff --git a/service/verifiedpermissions/types/types.go b/service/verifiedpermissions/types/types.go index ccd389b1f53..a04f33e4e5f 100644 --- a/service/verifiedpermissions/types/types.go +++ b/service/verifiedpermissions/types/types.go @@ -1536,9 +1536,9 @@ type PolicyItem struct { // The type of the policy. This is one of the following values: // - // - static + // - STATIC // - // - templateLinked + // - TEMPLATE_LINKED // // This member is required. PolicyType PolicyType @@ -1656,9 +1656,10 @@ type SchemaDefinition interface { } // A JSON string representation of the schema supported by applications that use -// this policy store. For more information, see [Policy store schema]in the Amazon Verified Permissions -// User Guide. +// this policy store. To delete the schema, run [PutSchema]with {} for this parameter. For +// more information, see [Policy store schema]in the Amazon Verified Permissions User Guide. // +// [PutSchema]: https://docs.aws.amazon.com/verifiedpermissions/latest/apireference/API_PutSchema.html // [Policy store schema]: https://docs.aws.amazon.com/verifiedpermissions/latest/userguide/schema.html type SchemaDefinitionMemberCedarJson struct { Value string