From 654a5c02d30931a224ef337646ba0d48ce50f558 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Mon, 13 Jan 2025 16:35:47 -0800 Subject: [PATCH] vendor: update nydus-snapshotter to v0.15 Signed-off-by: Tonis Tiigi --- go.mod | 42 +- go.sum | 88 +- .../aws/accountid_endpoint_mode.go | 18 + .../aws/aws-sdk-go-v2/aws/config.go | 3 + .../aws/aws-sdk-go-v2/aws/credentials.go | 3 + .../aws/aws-sdk-go-v2/aws/endpoints.go | 26 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/middleware.go | 10 +- .../aws/middleware/private/metrics/metrics.go | 5 +- .../aws/middleware/request_id_retriever.go | 12 +- .../aws/middleware/user_agent.go | 84 +- .../aws/protocol/eventstream/CHANGELOG.md | 16 + .../eventstream/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/aws/ratelimit/none.go | 20 + .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 45 +- .../aws/retry/retryable_error.go | 21 + .../aws/aws-sdk-go-v2/aws/retry/standard.go | 11 + .../aws/signer/internal/v4/headers.go | 1 - .../aws-sdk-go-v2/aws/signer/v4/middleware.go | 78 +- .../aws/aws-sdk-go-v2/aws/signer/v4/v4.go | 61 +- .../http/response_error_middleware.go | 10 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 115 ++ .../aws/aws-sdk-go-v2/config/config.go | 3 + .../aws/aws-sdk-go-v2/config/env_config.go | 37 + .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/load_options.go | 29 +- .../aws/aws-sdk-go-v2/config/provider.go | 17 + .../aws/aws-sdk-go-v2/config/resolve.go | 16 + .../aws/aws-sdk-go-v2/config/shared_config.go | 34 + .../aws-sdk-go-v2/credentials/CHANGELOG.md | 114 ++ .../endpointcreds/internal/client/client.go | 1 + .../credentials/endpointcreds/provider.go | 1 + .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 4 + .../ssocreds/sso_credentials_provider.go | 1 + .../stscreds/assume_role_provider.go | 6 + .../stscreds/web_identity_provider.go | 19 + .../feature/ec2/imds/CHANGELOG.md | 70 + .../feature/ec2/imds/api_client.go | 4 + .../aws/aws-sdk-go-v2/feature/ec2/imds/doc.go | 5 +- .../feature/ec2/imds/go_module_metadata.go | 2 +- .../feature/ec2/imds/request_middleware.go | 6 + .../feature/s3/manager/CHANGELOG.md | 143 ++ .../feature/s3/manager/download.go | 5 +- .../feature/s3/manager/go_module_metadata.go | 2 +- .../feature/s3/manager/upload.go | 29 + .../internal/auth/smithy/v4signer_adapter.go | 6 +- .../internal/awsutil/path_value.go | 225 --- .../internal/configsources/CHANGELOG.md | 65 + .../configsources/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/context/context.go | 13 + .../endpoints/awsrulesfn/partition.go | 11 +- .../endpoints/awsrulesfn/partitions.go | 94 +- .../endpoints/awsrulesfn/partitions.json | 6 +- .../internal/endpoints/v2/CHANGELOG.md | 66 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 4 + .../internal/ini/go_module_metadata.go | 2 +- .../internal/middleware/middleware.go | 42 + .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md | 66 + .../internal/v4a/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/internal/v4a/smithy.go | 8 +- .../internal/accept-encoding/CHANGELOG.md | 16 + .../accept-encoding/go_module_metadata.go | 2 +- .../service/internal/checksum/CHANGELOG.md | 74 + .../internal/checksum/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 74 + .../service/internal/presigned-url/context.go | 12 +- .../presigned-url/go_module_metadata.go | 2 +- .../service/internal/s3shared/CHANGELOG.md | 65 + .../internal/s3shared/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 128 ++ .../aws-sdk-go-v2/service/s3/api_client.go | 184 ++- .../service/s3/api_op_AbortMultipartUpload.go | 149 +- .../s3/api_op_CompleteMultipartUpload.go | 320 ++-- .../service/s3/api_op_CopyObject.go | 646 +++++--- .../service/s3/api_op_CreateBucket.go | 236 ++- .../s3/api_op_CreateMultipartUpload.go | 622 +++++--- .../service/s3/api_op_CreateSession.go | 112 +- .../service/s3/api_op_DeleteBucket.go | 81 +- ...i_op_DeleteBucketAnalyticsConfiguration.go | 60 +- .../service/s3/api_op_DeleteBucketCors.go | 51 +- .../s3/api_op_DeleteBucketEncryption.go | 61 +- ...teBucketIntelligentTieringConfiguration.go | 69 +- ...i_op_DeleteBucketInventoryConfiguration.go | 62 +- .../s3/api_op_DeleteBucketLifecycle.go | 60 +- ...api_op_DeleteBucketMetricsConfiguration.go | 69 +- .../api_op_DeleteBucketOwnershipControls.go | 48 +- .../service/s3/api_op_DeleteBucketPolicy.go | 120 +- .../s3/api_op_DeleteBucketReplication.go | 64 +- .../service/s3/api_op_DeleteBucketTagging.go | 45 +- .../service/s3/api_op_DeleteBucketWebsite.go | 63 +- .../service/s3/api_op_DeleteObject.go | 190 ++- .../service/s3/api_op_DeleteObjectTagging.go | 77 +- .../service/s3/api_op_DeleteObjects.go | 238 +-- .../s3/api_op_DeletePublicAccessBlock.go | 57 +- ...api_op_GetBucketAccelerateConfiguration.go | 88 +- .../service/s3/api_op_GetBucketAcl.go | 87 +- .../api_op_GetBucketAnalyticsConfiguration.go | 66 +- .../service/s3/api_op_GetBucketCors.go | 87 +- .../service/s3/api_op_GetBucketEncryption.go | 61 +- ...etBucketIntelligentTieringConfiguration.go | 69 +- .../api_op_GetBucketInventoryConfiguration.go | 58 +- .../api_op_GetBucketLifecycleConfiguration.go | 78 +- .../service/s3/api_op_GetBucketLocation.go | 95 +- .../service/s3/api_op_GetBucketLogging.go | 48 +- .../api_op_GetBucketMetricsConfiguration.go | 67 +- ...i_op_GetBucketNotificationConfiguration.go | 95 +- .../s3/api_op_GetBucketOwnershipControls.go | 48 +- .../service/s3/api_op_GetBucketPolicy.go | 149 +- .../s3/api_op_GetBucketPolicyStatus.go | 61 +- .../service/s3/api_op_GetBucketReplication.go | 68 +- .../s3/api_op_GetBucketRequestPayment.go | 40 +- .../service/s3/api_op_GetBucketTagging.go | 49 +- .../service/s3/api_op_GetBucketVersioning.go | 54 +- .../service/s3/api_op_GetBucketWebsite.go | 51 +- .../service/s3/api_op_GetObject.go | 573 ++++--- .../service/s3/api_op_GetObjectAcl.go | 100 +- .../service/s3/api_op_GetObjectAttributes.go | 259 +-- .../service/s3/api_op_GetObjectLegalHold.go | 66 +- .../s3/api_op_GetObjectLockConfiguration.go | 58 +- .../service/s3/api_op_GetObjectRetention.go | 66 +- .../service/s3/api_op_GetObjectTagging.go | 85 +- .../service/s3/api_op_GetObjectTorrent.go | 64 +- .../service/s3/api_op_GetPublicAccessBlock.go | 68 +- .../service/s3/api_op_HeadBucket.go | 255 +-- .../service/s3/api_op_HeadObject.go | 597 ++++--- ...pi_op_ListBucketAnalyticsConfigurations.go | 73 +- ...tBucketIntelligentTieringConfigurations.go | 69 +- ...pi_op_ListBucketInventoryConfigurations.go | 72 +- .../api_op_ListBucketMetricsConfigurations.go | 82 +- .../service/s3/api_op_ListBuckets.go | 36 +- .../service/s3/api_op_ListDirectoryBuckets.go | 80 +- .../service/s3/api_op_ListMultipartUploads.go | 275 ++-- .../service/s3/api_op_ListObjectVersions.go | 84 +- .../service/s3/api_op_ListObjects.go | 159 +- .../service/s3/api_op_ListObjectsV2.go | 265 ++-- .../service/s3/api_op_ListParts.go | 279 ++-- ...api_op_PutBucketAccelerateConfiguration.go | 86 +- .../service/s3/api_op_PutBucketAcl.go | 245 ++- .../api_op_PutBucketAnalyticsConfiguration.go | 90 +- .../service/s3/api_op_PutBucketCors.go | 98 +- .../service/s3/api_op_PutBucketEncryption.go | 102 +- ...utBucketIntelligentTieringConfiguration.go | 93 +- .../api_op_PutBucketInventoryConfiguration.go | 122 +- .../api_op_PutBucketLifecycleConfiguration.go | 115 +- .../service/s3/api_op_PutBucketLogging.go | 133 +- .../api_op_PutBucketMetricsConfiguration.go | 73 +- ...i_op_PutBucketNotificationConfiguration.go | 108 +- .../s3/api_op_PutBucketOwnershipControls.go | 55 +- .../service/s3/api_op_PutBucketPolicy.go | 184 ++- .../service/s3/api_op_PutBucketReplication.go | 140 +- .../s3/api_op_PutBucketRequestPayment.go | 68 +- .../service/s3/api_op_PutBucketTagging.go | 107 +- .../service/s3/api_op_PutBucketVersioning.go | 107 +- .../service/s3/api_op_PutBucketWebsite.go | 96 +- .../service/s3/api_op_PutObject.go | 521 ++++--- .../service/s3/api_op_PutObjectAcl.go | 303 ++-- .../service/s3/api_op_PutObjectLegalHold.go | 73 +- .../s3/api_op_PutObjectLockConfiguration.go | 75 +- .../service/s3/api_op_PutObjectRetention.go | 89 +- .../service/s3/api_op_PutObjectTagging.go | 129 +- .../service/s3/api_op_PutPublicAccessBlock.go | 93 +- .../service/s3/api_op_RestoreObject.go | 244 +-- .../service/s3/api_op_SelectObjectContent.go | 163 +- .../service/s3/api_op_UploadPart.go | 355 +++-- .../service/s3/api_op_UploadPartCopy.go | 417 +++-- .../s3/api_op_WriteGetObjectResponse.go | 177 ++- .../aws/aws-sdk-go-v2/service/s3/auth.go | 14 +- .../aws-sdk-go-v2/service/s3/deserializers.go | 31 +- .../service/s3/endpoint_auth_resolver.go | 11 + .../aws/aws-sdk-go-v2/service/s3/endpoints.go | 113 +- .../service/s3/express_resolve.go | 9 +- .../service/s3/express_user_agent.go | 43 + .../aws-sdk-go-v2/service/s3/generated.json | 4 +- .../service/s3/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/s3/options.go | 34 +- .../aws-sdk-go-v2/service/s3/serializers.go | 36 +- .../aws-sdk-go-v2/service/s3/types/enums.go | 292 ++-- .../aws-sdk-go-v2/service/s3/types/errors.go | 19 +- .../aws-sdk-go-v2/service/s3/types/types.go | 1386 ++++++++++++----- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 97 ++ .../aws-sdk-go-v2/service/sso/api_client.go | 188 ++- .../service/sso/api_op_GetRoleCredentials.go | 25 +- .../service/sso/api_op_ListAccountRoles.go | 44 +- .../service/sso/api_op_ListAccounts.go | 49 +- .../service/sso/api_op_Logout.go | 49 +- .../aws/aws-sdk-go-v2/service/sso/auth.go | 8 +- .../service/sso/deserializers.go | 10 + .../aws/aws-sdk-go-v2/service/sso/doc.go | 22 +- .../aws-sdk-go-v2/service/sso/endpoints.go | 28 +- .../aws-sdk-go-v2/service/sso/generated.json | 4 +- .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 32 + .../aws/aws-sdk-go-v2/service/sso/options.go | 34 +- .../aws-sdk-go-v2/service/sso/types/types.go | 20 +- .../service/ssooidc/CHANGELOG.md | 97 ++ .../service/ssooidc/api_client.go | 188 ++- .../service/ssooidc/api_op_CreateToken.go | 74 +- .../ssooidc/api_op_CreateTokenWithIAM.go | 84 +- .../service/ssooidc/api_op_RegisterClient.go | 37 +- .../api_op_StartDeviceAuthorization.go | 29 +- .../aws/aws-sdk-go-v2/service/ssooidc/auth.go | 8 +- .../service/ssooidc/deserializers.go | 101 ++ .../aws/aws-sdk-go-v2/service/ssooidc/doc.go | 40 +- .../service/ssooidc/endpoints.go | 28 +- .../service/ssooidc/generated.json | 4 +- .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 32 + .../aws-sdk-go-v2/service/ssooidc/options.go | 34 +- .../service/ssooidc/serializers.go | 56 + .../service/ssooidc/types/errors.go | 32 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 98 ++ .../aws-sdk-go-v2/service/sts/api_client.go | 190 ++- .../service/sts/api_op_AssumeRole.go | 471 +++--- .../service/sts/api_op_AssumeRoleWithSAML.go | 385 +++-- .../sts/api_op_AssumeRoleWithWebIdentity.go | 399 +++-- .../sts/api_op_DecodeAuthorizationMessage.go | 66 +- .../service/sts/api_op_GetAccessKeyInfo.go | 70 +- .../service/sts/api_op_GetCallerIdentity.go | 46 +- .../service/sts/api_op_GetFederationToken.go | 330 ++-- .../service/sts/api_op_GetSessionToken.go | 125 +- .../aws/aws-sdk-go-v2/service/sts/auth.go | 8 +- .../service/sts/deserializers.go | 9 + .../aws/aws-sdk-go-v2/service/sts/doc.go | 12 +- .../aws-sdk-go-v2/service/sts/endpoints.go | 28 +- .../aws-sdk-go-v2/service/sts/generated.json | 4 +- .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/sts/options.go | 34 +- .../aws-sdk-go-v2/service/sts/types/errors.go | 26 +- .../aws-sdk-go-v2/service/sts/types/types.go | 50 +- vendor/github.com/aws/smithy-go/.gitignore | 3 + vendor/github.com/aws/smithy-go/CHANGELOG.md | 23 + vendor/github.com/aws/smithy-go/Makefile | 7 +- .../aws/smithy-go/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/modman.toml | 1 - .../github.com/containerd/containerd/LICENSE | 191 --- .../github.com/containerd/containerd/NOTICE | 16 - .../archive/compression/compression.go | 323 ---- .../archive/compression/compression_fuzzer.go | 28 - .../containerd/archive/link_default.go | 25 - .../containerd/archive/link_freebsd.go | 82 - .../containerd/containerd/archive/tar.go | 823 ---------- .../containerd/archive/tar_freebsd.go | 46 - .../containerd/archive/tar_mostunix.go | 55 - .../containerd/containerd/archive/tar_opts.go | 111 -- .../containerd/archive/tar_opts_linux.go | 57 - .../containerd/archive/tar_opts_windows.go | 70 - .../containerd/containerd/archive/tar_unix.go | 203 --- .../containerd/archive/tar_windows.go | 107 -- .../containerd/archive/tarheader/tarheader.go | 82 - .../archive/tarheader/tarheader_unix.go | 59 - .../containerd/containerd/archive/time.go | 54 - .../containerd/archive/time_unix.go | 38 - .../containerd/archive/time_windows.go | 41 - .../containerd/containerd/content/adaptor.go | 52 - .../containerd/containerd/content/content.go | 207 --- .../containerd/containerd/content/helpers.go | 340 ---- .../content/local/content_local_fuzzer.go | 76 - .../containerd/content/local/locks.go | 62 - .../containerd/content/local/readerat.go | 72 - .../containerd/content/local/store.go | 721 --------- .../containerd/content/local/store_bsd.go | 33 - .../containerd/content/local/store_openbsd.go | 33 - .../containerd/content/local/store_unix.go | 33 - .../containerd/content/local/store_windows.go | 26 - .../containerd/content/local/test_helper.go | 38 - .../containerd/content/local/writer.go | 209 --- .../containerd/containerd/errdefs/errors.go | 72 - .../containerd/containerd/errdefs/grpc.go | 147 -- .../containerd/containerd/filters/adaptor.go | 33 - .../containerd/containerd/filters/filter.go | 178 --- .../containerd/containerd/filters/parser.go | 290 ---- .../containerd/containerd/filters/quote.go | 252 --- .../containerd/containerd/filters/scanner.go | 297 ---- .../containerd/images/annotations.go | 23 - .../containerd/images/converter/converter.go | 126 -- .../containerd/images/converter/default.go | 454 ------ .../containerd/containerd/images/diffid.go | 81 - .../containerd/containerd/images/handlers.go | 323 ---- .../containerd/containerd/images/image.go | 444 ------ .../containerd/images/importexport.go | 37 - .../containerd/containerd/images/labels.go | 21 - .../containerd/images/mediatypes.go | 215 --- .../containerd/containerd/labels/labels.go | 29 - .../containerd/containerd/labels/validate.go | 41 - .../containerd/containerd/leases/context.go | 40 - .../containerd/containerd/leases/grpc.go | 58 - .../containerd/containerd/leases/id.go | 43 - .../containerd/containerd/leases/lease.go | 91 -- .../containerd/pkg/epoch/context.go | 41 - .../containerd/containerd/pkg/epoch/epoch.go | 69 - .../containerd/pkg/randutil/randutil.go | 48 - .../{ => v2}/pkg/snapshotters/annotations.go | 7 +- .../pkg/converter/convert_unix.go | 14 +- .../pkg/converter/convert_windows.go | 4 +- .../pkg/converter/cs_proxy_unix.go | 2 +- .../pkg/converter/tool/builder.go | 28 +- .../nydus-snapshotter/pkg/converter/types.go | 2 +- .../nydus-snapshotter/pkg/converter/utils.go | 2 +- .../nydus-snapshotter/pkg/label/label.go | 2 +- .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 28 - .../github.com/jmespath/go-jmespath/LICENSE | 13 - .../github.com/jmespath/go-jmespath/Makefile | 51 - .../github.com/jmespath/go-jmespath/README.md | 87 -- vendor/github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 ---------- .../jmespath/go-jmespath/interpreter.go | 418 ----- .../github.com/jmespath/go-jmespath/lexer.go | 420 ----- .../github.com/jmespath/go-jmespath/parser.go | 603 ------- .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 --- vendor/modules.txt | 101 +- 315 files changed, 14758 insertions(+), 17388 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go delete mode 100644 vendor/github.com/containerd/containerd/LICENSE delete mode 100644 vendor/github.com/containerd/containerd/NOTICE delete mode 100644 vendor/github.com/containerd/containerd/archive/compression/compression.go delete mode 100644 vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go delete mode 100644 vendor/github.com/containerd/containerd/archive/link_default.go delete mode 100644 vendor/github.com/containerd/containerd/archive/link_freebsd.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_freebsd.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_mostunix.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts_linux.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_opts_windows.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_unix.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tar_windows.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tarheader/tarheader.go delete mode 100644 vendor/github.com/containerd/containerd/archive/tarheader/tarheader_unix.go delete mode 100644 vendor/github.com/containerd/containerd/archive/time.go delete mode 100644 vendor/github.com/containerd/containerd/archive/time_unix.go delete mode 100644 vendor/github.com/containerd/containerd/archive/time_windows.go delete mode 100644 vendor/github.com/containerd/containerd/content/adaptor.go delete mode 100644 vendor/github.com/containerd/containerd/content/content.go delete mode 100644 vendor/github.com/containerd/containerd/content/helpers.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/locks.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/readerat.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_bsd.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_openbsd.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_unix.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_windows.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/test_helper.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/writer.go delete mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go delete mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go delete mode 100644 vendor/github.com/containerd/containerd/filters/adaptor.go delete mode 100644 vendor/github.com/containerd/containerd/filters/filter.go delete mode 100644 vendor/github.com/containerd/containerd/filters/parser.go delete mode 100644 vendor/github.com/containerd/containerd/filters/quote.go delete mode 100644 vendor/github.com/containerd/containerd/filters/scanner.go delete mode 100644 vendor/github.com/containerd/containerd/images/annotations.go delete mode 100644 vendor/github.com/containerd/containerd/images/converter/converter.go delete mode 100644 vendor/github.com/containerd/containerd/images/converter/default.go delete mode 100644 vendor/github.com/containerd/containerd/images/diffid.go delete mode 100644 vendor/github.com/containerd/containerd/images/handlers.go delete mode 100644 vendor/github.com/containerd/containerd/images/image.go delete mode 100644 vendor/github.com/containerd/containerd/images/importexport.go delete mode 100644 vendor/github.com/containerd/containerd/images/labels.go delete mode 100644 vendor/github.com/containerd/containerd/images/mediatypes.go delete mode 100644 vendor/github.com/containerd/containerd/labels/labels.go delete mode 100644 vendor/github.com/containerd/containerd/labels/validate.go delete mode 100644 vendor/github.com/containerd/containerd/leases/context.go delete mode 100644 vendor/github.com/containerd/containerd/leases/grpc.go delete mode 100644 vendor/github.com/containerd/containerd/leases/id.go delete mode 100644 vendor/github.com/containerd/containerd/leases/lease.go delete mode 100644 vendor/github.com/containerd/containerd/pkg/epoch/context.go delete mode 100644 vendor/github.com/containerd/containerd/pkg/epoch/epoch.go delete mode 100644 vendor/github.com/containerd/containerd/pkg/randutil/randutil.go rename vendor/github.com/containerd/containerd/{ => v2}/pkg/snapshotters/annotations.go (95%) delete mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE delete mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/util.go diff --git a/go.mod b/go.mod index d9a479904380..03da6fb38e8b 100644 --- a/go.mod +++ b/go.mod @@ -9,11 +9,11 @@ require ( github.com/Microsoft/hcsshim v0.12.9 github.com/agext/levenshtein v1.2.3 github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 - github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.26.6 - github.com/aws/aws-sdk-go-v2/credentials v1.16.16 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 - github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.27 + github.com/aws/aws-sdk-go-v2/credentials v1.17.27 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 github.com/containerd/console v1.0.4 github.com/containerd/containerd/api v1.8.0 github.com/containerd/containerd/v2 v2.0.1 @@ -23,7 +23,7 @@ require ( github.com/containerd/go-cni v1.1.11 github.com/containerd/go-runc v1.1.0 github.com/containerd/log v0.1.0 - github.com/containerd/nydus-snapshotter v0.14.0 + github.com/containerd/nydus-snapshotter v0.15.0 github.com/containerd/platforms v1.0.0-rc.0 github.com/containerd/stargz-snapshotter v0.16.3 github.com/containerd/stargz-snapshotter/estargz v0.16.3 @@ -116,25 +116,24 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect - github.com/aws/smithy-go v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd v1.7.24 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/fifo v1.1.0 // indirect github.com/containerd/plugin v1.0.0 // indirect @@ -159,7 +158,6 @@ require ( github.com/hanwen/go-fuse/v2 v2.6.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/moby/sys/mount v0.3.4 // indirect github.com/moby/sys/sequential v0.6.0 // indirect diff --git a/go.sum b/go.sum index bd49bf707963..69b5396aa6cd 100644 --- a/go.sum +++ b/go.sum @@ -29,44 +29,44 @@ github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1 github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= -github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= -github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90= +github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 h1:u1KOU1S15ufyZqmH/rA3POkiRH6EcDANHj2xHRzq+zc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -89,8 +89,6 @@ github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGD github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/containerd/containerd v1.7.24 h1:zxszGrGjrra1yYJW/6rhm9cJ1ZQ8rkKBR48brqsa7nA= -github.com/containerd/containerd v1.7.24/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/containerd/v2 v2.0.1 h1:xqSar9cjkGhfQ2YvanCu7FMLk6+pNCFMCAroM2ALPp0= @@ -111,8 +109,8 @@ github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gG github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nydus-snapshotter v0.14.0 h1:6/eAi6d7MjaeLLuMO8Udfe5GVsDudmrDNO4SGETMBco= -github.com/containerd/nydus-snapshotter v0.14.0/go.mod h1:TT4jv2SnIDxEBu4H2YOvWQHPOap031ydTaHTuvc5VQk= +github.com/containerd/nydus-snapshotter v0.15.0 h1:RqZRs1GPeM6T3wmuxJV9u+2Rg4YETVMwTmiDeX+iWC8= +github.com/containerd/nydus-snapshotter v0.15.0/go.mod h1:biq0ijpeZe0I5yZFSJyHzFSjjRZQ7P7y/OuHyd7hYOw= github.com/containerd/platforms v1.0.0-rc.0 h1:GuHWSKgVVO3POn6nRBB4sH63uPOLa87yuuhsGLWaXAA= github.com/containerd/platforms v1.0.0-rc.0/go.mod h1:T1XAzzOdYs3it7l073MNXyxRwQofJfqwi/8cRjufIk4= github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= @@ -244,10 +242,6 @@ github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyf github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -568,8 +562,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go new file mode 100644 index 000000000000..6504a21864cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go @@ -0,0 +1,18 @@ +package aws + +// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing. +type AccountIDEndpointMode string + +const ( + // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing + AccountIDEndpointModeUnset AccountIDEndpointMode = "" + + // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present + AccountIDEndpointModePreferred = "preferred" + + // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity + AccountIDEndpointModeRequired = "required" + + // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing + AccountIDEndpointModeDisabled = "disabled" +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index 2264200c169d..16000d792790 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -162,6 +162,9 @@ type Config struct { // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or // the shared config profile attribute request_min_compression_size_bytes RequestMinCompressSizeBytes int64 + + // Controls how a resolved AWS account ID is handled for endpoint routing. + AccountIDEndpointMode AccountIDEndpointMode } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 714d4ad85cb6..98ba77056427 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -90,6 +90,9 @@ type Credentials struct { // The time the credentials will expire at. Should be ignored if CanExpire // is false. Expires time.Time + + // The ID of the account for the credentials. + AccountID string } // Expired returns if the credentials have expired. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go index aa10a9b40f0d..99edbf3ee634 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go @@ -70,6 +70,10 @@ func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found // The SDK will automatically resolve these endpoints per API client using an // internal endpoint resolvers. If you'd like to provide custom endpoint // resolving behavior you can implement the EndpointResolver interface. +// +// Deprecated: This structure was used with the global [EndpointResolver] +// interface, which has been deprecated in favor of service-specific endpoint +// resolution. See the deprecation docs on that interface for more information. type Endpoint struct { // The base URL endpoint the SDK API clients will use to make API calls to. // The SDK will suffix URI path and query elements to this endpoint. @@ -124,6 +128,8 @@ type Endpoint struct { } // EndpointSource is the endpoint source type. +// +// Deprecated: The global [Endpoint] structure is deprecated. type EndpointSource int const ( @@ -161,19 +167,25 @@ func (e *EndpointNotFoundError) Unwrap() error { // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. // -// Deprecated: See EndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Setting a value for +// EndpointResolver on aws.Config or service client options will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. type EndpointResolver interface { ResolveEndpoint(service, region string) (Endpoint, error) } // EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface. // -// Deprecated: See EndpointResolverWithOptionsFunc +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverFunc func(service, region string) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. -// -// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) { return e(service, region) } @@ -184,11 +196,17 @@ func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, // available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error, // API clients will fallback to attempting to resolve the endpoint using its // internal default endpoint resolver. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptions interface { ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error) } // EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [EndpointResolver]. type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error) // ResolveEndpoint calls the wrapped function and returns the results. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 66d09630308e..ba898a1a85f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.24.1" +const goModuleVersion = "1.30.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go index 9bd0dfb15086..6d5f0079c2f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go @@ -139,16 +139,16 @@ func AddRecordResponseTiming(stack *middleware.Stack) error { // raw response within the response metadata. type rawResponseKey struct{} -// addRawResponse middleware adds raw response on to the metadata -type addRawResponse struct{} +// AddRawResponse middleware adds raw response on to the metadata +type AddRawResponse struct{} // ID the identifier for the ClientRequestID -func (m *addRawResponse) ID() string { +func (m *AddRawResponse) ID() string { return "AddRawResponseToMetadata" } // HandleDeserialize adds raw response on the middleware metadata -func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -159,7 +159,7 @@ func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.Des // AddRawResponseToMetadata adds middleware to the middleware stack that // store raw response on to the metadata. func AddRawResponseToMetadata(stack *middleware.Stack) error { - return stack.Deserialize.Add(&addRawResponse{}, middleware.Before) + return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before) } // GetRawResponse returns raw response set on metadata diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go index b0133f4c88d0..19d6107c4615 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go @@ -112,6 +112,8 @@ type MetricData struct { ResolveEndpointStartTime time.Time ResolveEndpointEndTime time.Time EndpointResolutionDuration time.Duration + GetIdentityStartTime time.Time + GetIdentityEndTime time.Time InThroughput float64 OutThroughput float64 RetryCount int @@ -122,6 +124,7 @@ type MetricData struct { OperationName string PartitionID string Region string + UserAgent string RequestContentLength int64 Stream StreamMetrics Attempts []AttemptMetrics @@ -144,8 +147,6 @@ type AttemptMetrics struct { ConnRequestedTime time.Time ConnObtainedTime time.Time ConcurrencyAcquireDuration time.Duration - CredentialFetchStartTime time.Time - CredentialFetchEndTime time.Time SignStartTime time.Time SignEndTime time.Time SigningDuration time.Duration diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go index 7ce48c611cd1..e7d268c3da54 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go @@ -11,18 +11,22 @@ import ( func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error { // add error wrapper middleware before operation deserializers so that it can wrap the error response // returned by operation deserializers - return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before) + return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before) } -type requestIDRetriever struct { +// RequestIDRetriever middleware captures the AWS service request ID from the +// raw response. +type RequestIDRetriever struct { } // ID returns the middleware identifier -func (m *requestIDRetriever) ID() string { +func (m *RequestIDRetriever) ID() string { return "RequestIDRetriever" } -func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +// HandleDeserialize pulls the AWS request ID from the response, storing it in +// operation metadata. +func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index af3447ddc980..ff0bc921f1bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime" + "sort" "strings" "github.com/aws/aws-sdk-go-v2/aws" @@ -30,6 +31,7 @@ const ( FrameworkMetadata AdditionalMetadata ApplicationIdentifier + FeatureMetadata2 ) func (k SDKAgentKeyType) string() string { @@ -50,6 +52,8 @@ func (k SDKAgentKeyType) string() string { return "lib" case ApplicationIdentifier: return "app" + case FeatureMetadata2: + return "m" case AdditionalMetadata: fallthrough default: @@ -64,12 +68,32 @@ var validChars = map[rune]bool{ '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, } -// requestUserAgent is a build middleware that set the User-Agent for the request. -type requestUserAgent struct { +// UserAgentFeature enumerates tracked SDK features. +type UserAgentFeature string + +// Enumerates UserAgentFeature. +const ( + UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types) + UserAgentFeatureWaiter = "B" + UserAgentFeaturePaginator = "C" + UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard) + UserAgentFeatureRetryModeStandard = "E" + UserAgentFeatureRetryModeAdaptive = "F" + UserAgentFeatureS3Transfer = "G" + UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external) + UserAgentFeatureS3CryptoV2 = "I" // n/a + UserAgentFeatureS3ExpressBucket = "J" + UserAgentFeatureS3AccessGrants = "K" // not yet implemented + UserAgentFeatureGZIPRequestCompression = "L" +) + +// RequestUserAgent is a build middleware that set the User-Agent for the request. +type RequestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder + features map[UserAgentFeature]struct{} } -// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the +// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the // request. // // User-Agent example: @@ -79,14 +103,15 @@ type requestUserAgent struct { // X-Amz-User-Agent example: // // aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 -func newRequestUserAgent() *requestUserAgent { +func NewRequestUserAgent() *RequestUserAgent { userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() addProductName(userAgent) addProductName(sdkAgent) - r := &requestUserAgent{ + r := &RequestUserAgent{ sdkAgent: sdkAgent, userAgent: userAgent, + features: map[UserAgentFeature]struct{}{}, } addSDKMetadata(r) @@ -94,7 +119,7 @@ func newRequestUserAgent() *requestUserAgent { return r } -func addSDKMetadata(r *requestUserAgent) { +func addSDKMetadata(r *RequestUserAgent) { r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName()) r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion) r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS) @@ -162,18 +187,18 @@ func AddRequestUserAgentMiddleware(stack *middleware.Stack) error { return err } -func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) { - id := (*requestUserAgent)(nil).ID() +func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) { + id := (*RequestUserAgent)(nil).ID() bm, ok := stack.Build.Get(id) if !ok { - bm = newRequestUserAgent() + bm = NewRequestUserAgent() err := stack.Build.Add(bm, middleware.After) if err != nil { return nil, err } } - requestUserAgent, ok := bm.(*requestUserAgent) + requestUserAgent, ok := bm.(*RequestUserAgent) if !ok { return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id) } @@ -182,34 +207,40 @@ func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error } // AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *requestUserAgent) AddUserAgentKey(key string) { +func (u *RequestUserAgent) AddUserAgentKey(key string) { u.userAgent.AddKey(strings.Map(rules, key)) } // AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { +func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) { u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) } -// AddUserAgentKey adds the component identified by name to the User-Agent string. -func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { +// AddUserAgentFeature adds the feature ID to the tracking list to be emitted +// in the final User-Agent string. +func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) { + u.features[feature] = struct{}{} +} + +// AddSDKAgentKey adds the component identified by name to the User-Agent string. +func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { // TODO: should target sdkAgent u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) } -// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. -func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { +// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string. +func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { // TODO: should target sdkAgent u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) } // ID the name of the middleware. -func (u *requestUserAgent) ID() string { +func (u *RequestUserAgent) ID() string { return "UserAgent" } // HandleBuild adds or appends the constructed user agent to the request. -func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( +func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( out middleware.BuildOutput, metadata middleware.Metadata, err error, ) { switch req := in.Request.(type) { @@ -224,12 +255,15 @@ func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI return next.HandleBuild(ctx, in) } -func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { +func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) { const userAgent = "User-Agent" updateHTTPHeader(request, userAgent, u.userAgent.Build()) + if len(u.features) > 0 { + updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features)) + } } -func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { +func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) { const sdkAgent = "X-Amz-User-Agent" updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build()) } @@ -259,3 +293,13 @@ func rules(r rune) rune { return '-' } } + +func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string { + fs := make([]string, 0, len(features)) + for f := range features { + fs = append(fs, string(f)) + } + + sort.Strings(fs) + return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ",")) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index 1e1da56b3b9e..7df617668f87 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.6.3 (2024-06-28) + +* No change notes available for this release. + +# v1.6.2 (2024-03-29) + +* No change notes available for this release. + +# v1.6.1 (2024-02-21) + +* No change notes available for this release. + +# v1.6.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # v1.5.4 (2023-12-07) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index 6759e90e99f8..382f886a92d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.5.4" +const goModuleVersion = "1.6.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go new file mode 100644 index 000000000000..8c78364105bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go @@ -0,0 +1,20 @@ +package ratelimit + +import "context" + +// None implements a no-op rate limiter which effectively disables client-side +// rate limiting (also known as "retry quotas"). +// +// GetToken does nothing and always returns a nil error. The returned +// token-release function does nothing, and always returns a nil error. +// +// AddTokens does nothing and always returns a nil error. +var None = &none{} + +type none struct{} + +func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) { + return func() error { return nil }, nil +} + +func (*none) AddTokens(v uint) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index dc703d482d25..b645fbdf1326 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -2,12 +2,15 @@ package retry import ( "context" + "errors" "fmt" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" "strconv" "strings" "time" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/aws" awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -39,6 +42,10 @@ type Attempt struct { requestCloner RequestCloner } +// define the threshold at which we will consider certain kind of errors to be probably +// caused by clock skew +const skewThreshold = 4 * time.Minute + // NewAttemptMiddleware returns a new Attempt retry middleware. func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt { m := &Attempt{ @@ -86,6 +93,9 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn AttemptClockSkew: attemptClockSkew, }) + // Setting clock skew to be used on other context (like signing) + ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew) + var attemptResult AttemptResult out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) @@ -185,6 +195,8 @@ func (r *Attempt) handleAttempt( return out, attemptResult, nopRelease, err } + err = wrapAsClockSkew(ctx, err) + //------------------------------ // Is Retryable and Should Retry //------------------------------ @@ -247,6 +259,37 @@ func (r *Attempt) handleAttempt( return out, attemptResult, releaseRetryToken, err } +// errors that, if detected when we know there's a clock skew, +// can be retried and have a high chance of success +var possibleSkewCodes = map[string]struct{}{ + "InvalidSignatureException": {}, + "SignatureDoesNotMatch": {}, + "AuthFailure": {}, +} + +var definiteSkewCodes = map[string]struct{}{ + "RequestExpired": {}, + "RequestInTheFuture": {}, + "RequestTimeTooSkewed": {}, +} + +// wrapAsClockSkew checks if this error could be related to a clock skew +// error and if so, wrap the error. +func wrapAsClockSkew(ctx context.Context, err error) error { + var v interface{ ErrorCode() string } + if !errors.As(err, &v) { + return err + } + if _, ok := definiteSkewCodes[v.ErrorCode()]; ok { + return &retryableClockSkewError{Err: err} + } + _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()] + if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode { + return &retryableClockSkewError{Err: err} + } + return err +} + // MetricsHeader attaches SDK request metric header for retries to the transport type MetricsHeader struct{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 987affdde6fa..acd8d1cc3d66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -2,6 +2,7 @@ package retry import ( "errors" + "fmt" "net" "net/url" "strings" @@ -199,3 +200,23 @@ func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary { return aws.TrueTernary } + +// retryableClockSkewError marks errors that can be caused by clock skew +// (difference between server time and client time). +// This is returned when there's certain confidence that adjusting the client time +// could allow a retry to succeed +type retryableClockSkewError struct{ Err error } + +func (e *retryableClockSkewError) Error() string { + return fmt.Sprintf("Probable clock skew error: %v", e.Err) +} + +// Unwrap returns the wrapped error. +func (e *retryableClockSkewError) Unwrap() error { + return e.Err +} + +// RetryableError allows the retryer to retry this request +func (e *retryableClockSkewError) RetryableError() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go index 25abffc81289..d5ea93222ed1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -123,6 +123,17 @@ type StandardOptions struct { // Provides the rate limiting strategy for rate limiting attempt retries // across all attempts the retryer is being used with. + // + // A RateLimiter operates as a token bucket with a set capacity, where + // attempt failures events consume tokens. A retry attempt that attempts to + // consume more tokens than what's available results in operation failure. + // The default implementation is parameterized as follows: + // - a capacity of 500 (DefaultRetryRateTokens) + // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost) + // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost) + // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement) + // + // You can disable rate limiting by setting this field to ratelimit.None. RateLimiter RateLimiter // The cost to deduct from the RateLimiter's token bucket per retry. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index ca738f234b3b..71b1a3521718 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -38,7 +38,6 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index f39a369ad84d..a9db6433de97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -11,7 +11,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" @@ -85,12 +84,12 @@ func (m *dynamicPayloadSigningMiddleware) HandleFinalize( } if req.IsHTTPS() { - return (&unsignedPayload{}).HandleFinalize(ctx, in, next) + return (&UnsignedPayload{}).HandleFinalize(ctx, in, next) } - return (&computePayloadSHA256{}).HandleFinalize(ctx, in, next) + return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next) } -// unsignedPayload sets the SigV4 request payload hash to unsigned. +// UnsignedPayload sets the SigV4 request payload hash to unsigned. // // Will not set the Unsigned Payload magic SHA value, if a SHA has already been // stored in the context. (e.g. application pre-computed SHA256 before making @@ -98,21 +97,21 @@ func (m *dynamicPayloadSigningMiddleware) HandleFinalize( // // This middleware does not check the X-Amz-Content-Sha256 header, if that // header is serialized a middleware must translate it into the context. -type unsignedPayload struct{} +type UnsignedPayload struct{} // AddUnsignedPayloadMiddleware adds unsignedPayload to the operation // middleware stack func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&unsignedPayload{}, "ResolveEndpointV2", middleware.After) + return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After) } // ID returns the unsignedPayload identifier -func (m *unsignedPayload) ID() string { +func (m *UnsignedPayload) ID() string { return computePayloadHashMiddlewareID } // HandleFinalize sets the payload hash magic value to the unsigned sentinel. -func (m *unsignedPayload) HandleFinalize( +func (m *UnsignedPayload) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -123,7 +122,7 @@ func (m *unsignedPayload) HandleFinalize( return next.HandleFinalize(ctx, in) } -// computePayloadSHA256 computes SHA256 payload hash to sign. +// ComputePayloadSHA256 computes SHA256 payload hash to sign. // // Will not set the Unsigned Payload magic SHA value, if a SHA has already been // stored in the context. (e.g. application pre-computed SHA256 before making @@ -131,12 +130,12 @@ func (m *unsignedPayload) HandleFinalize( // // This middleware does not check the X-Amz-Content-Sha256 header, if that // header is serialized a middleware must translate it into the context. -type computePayloadSHA256 struct{} +type ComputePayloadSHA256 struct{} // AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the // operation middleware stack func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&computePayloadSHA256{}, "ResolveEndpointV2", middleware.After) + return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) } // RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the @@ -147,13 +146,13 @@ func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { } // ID is the middleware name -func (m *computePayloadSHA256) ID() string { +func (m *ComputePayloadSHA256) ID() string { return computePayloadHashMiddlewareID } // HandleFinalize computes the payload hash for the request, storing it to the // context. This is a no-op if a caller has previously set that value. -func (m *computePayloadSHA256) HandleFinalize( +func (m *ComputePayloadSHA256) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -196,35 +195,35 @@ func (m *computePayloadSHA256) HandleFinalize( // Use this to disable computing the Payload SHA256 checksum and instead use // UNSIGNED-PAYLOAD for the SHA256 value. func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{}) return err } -// contentSHA256Header sets the X-Amz-Content-Sha256 header value to +// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to // the Payload hash stored in the context. -type contentSHA256Header struct{} +type ContentSHA256Header struct{} // AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the // operation middleware stack func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Finalize.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) + return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) } // RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware // from the operation middleware stack func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Finalize.Remove((*contentSHA256Header)(nil).ID()) + _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID()) return err } // ID returns the ContentSHA256HeaderMiddleware identifier -func (m *contentSHA256Header) ID() string { +func (m *ContentSHA256Header) ID() string { return "SigV4ContentSHA256Header" } // HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash // stored in the context. -func (m *contentSHA256Header) HandleFinalize( +func (m *ContentSHA256Header) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, @@ -301,22 +300,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} } - mctx := metrics.Context(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchStartTime = sdk.NowTime() - } - } - credentials, err := s.credentialsProvider.Retrieve(ctx) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.CredentialFetchEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } @@ -337,20 +321,7 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl }) } - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignStartTime = sdk.NowTime() - } - } - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) - - if mctx != nil { - if attempt, err := mctx.Data().LatestAttempt(); err == nil { - attempt.SignEndTime = sdk.NowTime() - } - } - if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } @@ -360,18 +331,21 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return next.HandleFinalize(ctx, in) } -type streamingEventsPayload struct{} +// StreamingEventsPayload signs input event stream messages. +type StreamingEventsPayload struct{} // AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Finalize.Add(&streamingEventsPayload{}, middleware.Before) + return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before) } -func (s *streamingEventsPayload) ID() string { +// ID identifies the middleware. +func (s *StreamingEventsPayload) ID() string { return computePayloadHashMiddlewareID } -func (s *streamingEventsPayload) HandleFinalize( +// HandleFinalize marks the input stream to be signed with SigV4. +func (s *StreamingEventsPayload) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go index bb61904e1d80..dcd896a9bf6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -1,48 +1,41 @@ -// Package v4 implements signing for AWS V4 signer +// Package v4 implements the AWS signature version 4 algorithm (commonly known +// as SigV4). // -// Provides request signing for request that need to be signed with -// AWS V4 Signatures. +// For more information about SigV4, see [Signing AWS API requests] in the IAM +// user guide. // -// # Standalone Signer +// While this implementation CAN work in an external context, it is developed +// primarily for SDK use and you may encounter fringe behaviors around header +// canonicalization. // -// Generally using the signer outside of the SDK should not require any additional +// # Pre-escaping a request URI // -// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// AWS v4 signature validation requires that the canonical string's URI path +// component must be the escaped form of the HTTP request's path. +// +// The Go HTTP client will perform escaping automatically on the HTTP request. +// This may cause signature validation errors because the request differs from +// the URI path or query from which the signature was generated. // -// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent -// to the service as. +// Because of this, we recommend that you explicitly escape the request when +// using this signer outside of the SDK to prevent possible signature mismatch. +// This can be done by setting URL.Opaque on the request. The signer will +// prefer that value, falling back to the return of URL.EscapedPath if unset. // -// The signer will first check the URL.Opaque field, and use its value if set. -// The signer does require the URL.Opaque field to be set in the form of: +// When setting URL.Opaque you must do so in the form of: // // "///" // // // e.g. // "//example.com/some/path" // -// The leading "//" and hostname are required or the URL.Opaque escaping will -// not work correctly. -// -// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() -// method and using the returned value. -// -// AWS v4 signature validation requires that the canonical string's URI path -// element must be the URI escaped form of the HTTP request's path. -// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -// -// The Go HTTP client will perform escaping automatically on the request. Some -// of these escaping may cause signature validation errors because the HTTP -// request differs from the URI path or query that the signature was generated. -// https://golang.org/pkg/net/url/#URL.EscapedPath +// The leading "//" and hostname are required or the escaping will not work +// correctly. // -// Because of this, it is recommended that when using the signer outside of the -// SDK that explicitly escaping the request prior to being signed is preferable, -// and will help prevent signature validation errors. This can be done by setting -// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then -// call URL.EscapedPath() if Opaque is not set. +// The TestStandaloneSign unit test provides a complete example of using the +// signer outside of the SDK and pre-escaping the URI path. // -// Test `TestStandaloneSign` provides a complete example of using the signer -// outside of the SDK and pre-escaping the URI path. +// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html package v4 import ( @@ -402,6 +395,12 @@ func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) query := url.Values{} unsignedHeaders := http.Header{} for k, h := range header { + // literally just this header has this constraint for some stupid reason, + // see #2508 + if k == "X-Amz-Expected-Bucket-Owner" { + k = "x-amz-expected-bucket-owner" + } + if r.IsValid(k) { query[k] = h } else { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go index 8fd14cecd231..a1ad20fe3418 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go @@ -12,18 +12,20 @@ import ( func AddResponseErrorMiddleware(stack *middleware.Stack) error { // add error wrapper middleware before request id retriever middleware so that it can wrap the error response // returned by operation deserializers - return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) } -type responseErrorWrapper struct { +// ResponseErrorWrapper wraps operation errors with ResponseError. +type ResponseErrorWrapper struct { } // ID returns the middleware identifier -func (m *responseErrorWrapper) ID() string { +func (m *ResponseErrorWrapper) ID() string { return "ResponseErrorWrapper" } -func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +// HandleDeserialize wraps the stack error with smithyhttp.ResponseError. +func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 31feb8b1c4d6..6977bdb781f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,118 @@ +# v1.27.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.17 (2024-06-03) + +* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.11 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.8 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.7 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.6 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.5 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.4 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.3 (2024-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2024-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.26.6 (2024-01-22) * **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 50582d89d54b..d5226cb04374 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -80,6 +80,9 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile resolveRequestMinCompressSizeBytes, + + // Sets the AccountIDEndpointMode if present in env var or shared config profile + resolveAccountIDEndpointMode, } // A Config represents a generic configuration value or set of values. This type diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 88550198ccee..3a06f1412a7c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -80,6 +80,9 @@ const ( awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES" awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH" + + awsAccountIDEnv = "AWS_ACCOUNT_ID" + awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE" ) var ( @@ -290,6 +293,9 @@ type EnvConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + // Indicates whether account ID will be required/ignored in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -309,6 +315,7 @@ func NewEnvConfig() (EnvConfig, error) { setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys) setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys) if creds.HasKeys() { + creds.AccountID = os.Getenv(awsAccountIDEnv) creds.SessionToken = os.Getenv(awsSessionTokenEnvVar) cfg.Credentials = creds } @@ -389,6 +396,10 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil { + return cfg, err + } + return cfg, nil } @@ -417,6 +428,10 @@ func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, return *c.RequestMinCompressSizeBytes, true, nil } +func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -491,6 +506,28 @@ func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error { return nil } +func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch value { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value) + } + break + } + return nil +} + // GetRegion returns the AWS Region if set in the environment. Returns an empty // string if not set. func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 51f6e332018c..a8fe2a2bbed8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.6" +const goModuleVersion = "1.27.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 06596c1b7c8c..5f643977b009 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -215,6 +215,8 @@ type LoadOptions struct { // Whether S3 Express auth is disabled. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode } func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) { @@ -278,6 +280,10 @@ func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, return *o.RequestMinCompressSizeBytes, true, nil } +func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil +} + // WithRegion is a helper function to construct functional options // that sets Region on config's LoadOptions. Setting the region to // an empty string, will result in the region value being ignored. @@ -323,6 +329,17 @@ func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOpt } } +// WithAccountIDEndpointMode is a helper function to construct functional options +// that sets AccountIDEndpointMode on config's LoadOptions +func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc { + return func(o *LoadOptions) error { + if m != "" { + o.AccountIDEndpointMode = m + } + return nil + } +} + // getDefaultRegion returns DefaultRegion from config's LoadOptions func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) { if len(o.DefaultRegion) == 0 { @@ -824,7 +841,14 @@ func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResol // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. // -// Deprecated: See WithEndpointResolverWithOptions +// Deprecated: The global endpoint resolution interface is deprecated. The API +// for endpoint resolution is now unique to each service and is set via the +// EndpointResolverV2 field on service client options. Use of +// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you +// from using any endpoint-related service features released after the +// introduction of EndpointResolverV2. You may also encounter broken or +// unexpected behavior when using the old global interface with services that +// use many endpoint-related customizations such as S3. func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolver = v @@ -844,6 +868,9 @@ func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.En // that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil, // the EndpointResolver value is ignored. If multiple WithEndpointResolver calls // are made, the last call overrides the previous call values. +// +// Deprecated: The global endpoint resolution interface is deprecated. See +// deprecation docs on [WithEndpointResolver]. func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc { return func(o *LoadOptions) error { o.EndpointResolverWithOptions = v diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 13745fc98fd9..043781f1f772 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -225,6 +225,23 @@ func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value return } +// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode +type accountIDEndpointModeProvider interface { + getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) +} + +func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(accountIDEndpointModeProvider); ok { + value, found, err = p.getAccountIDEndpointMode(ctx) + if err != nil || found { + break + } + } + } + return +} + // ec2IMDSRegionProvider provides access to the ec2 imds region // configuration value type ec2IMDSRegionProvider interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index fde2e3980e0f..41009c7da06c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -166,6 +166,22 @@ func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, co return nil } +// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's +// SharedConfig or EnvConfig +func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error { + m, found, err := getAccountIDEndpointMode(ctx, configs) + if err != nil { + return err + } + + if !found { + m = aws.AccountIDEndpointModePreferred + } + + cfg.AccountIDEndpointMode = m + return nil +} + // resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default // region if region had not been resolved from other sources. func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index c546cb7d0f59..d7a2b5307eab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -115,6 +115,9 @@ const ( requestMinCompressionSizeBytes = "request_min_compression_size_bytes" s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth" + + accountIDKey = "aws_account_id" + accountIDEndpointMode = "account_id_endpoint_mode" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -341,6 +344,8 @@ type SharedConfig struct { // will only bypass the modified endpoint routing and signing behaviors // associated with the feature. S3DisableExpressAuth *bool + + AccountIDEndpointMode aws.AccountIDEndpointMode } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -1124,12 +1129,17 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err) } + if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil { + return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err) + } + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), SecretAccessKey: section.String(secretAccessKey), SessionToken: section.String(sessionTokenKey), Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]), + AccountID: section.String(accountIDKey), } if creds.HasKeys() { @@ -1177,6 +1187,26 @@ func updateDisableRequestCompression(disable **bool, sec ini.Section, key string return nil } +func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error { + if !sec.Has(key) { + return nil + } + + v := sec.String(key) + switch v { + case "preferred": + *m = aws.AccountIDEndpointModePreferred + case "required": + *m = aws.AccountIDEndpointModeRequired + case "disabled": + *m = aws.AccountIDEndpointModeDisabled + default: + return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v) + } + + return nil +} + func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) { if c.RequestMinCompressSizeBytes == nil { return 0, false, nil @@ -1191,6 +1221,10 @@ func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, b return *c.DisableRequestCompression, true, nil } +func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) { + return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil +} + func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error { if !section.Has(key) { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 989c4ea39bee..66b7868161de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,117 @@ +# v1.17.27 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.26 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.25 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.24 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.23 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.22 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.21 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.20 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.19 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.16 (2024-01-18) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go index 9a869f895476..dc291c97cd7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go @@ -128,6 +128,7 @@ type GetCredentialsOutput struct { AccessKeyID string SecretAccessKey string Token string + AccountID string } // EndpointError is an error returned from the endpoint service diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index 0c3c4d68266c..2386153a9ec0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -152,6 +152,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.Token, Source: ProviderName, + AccountID: resp.AccountID, } if resp.Expiration != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index fe92184dc527..aaed530a2613 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.16" +const goModuleVersion = "1.17.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index fe9345e287c3..911fcc327294 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -167,6 +167,9 @@ type CredentialProcessResponse struct { // The date on which the current credentials expire. Expiration *time.Time + + // The ID of the account for credentials + AccountID string `json:"AccountId"` } // Retrieve executes the credential process command and returns the @@ -208,6 +211,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { AccessKeyID: resp.AccessKeyID, SecretAccessKey: resp.SecretAccessKey, SessionToken: resp.SessionToken, + AccountID: resp.AccountID, } // Handle expiration diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go index b3cf7853e764..8c230be8eb8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -129,6 +129,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { CanExpire: true, Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), Source: ProviderName, + AccountID: p.options.AccountID, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go index 289707b6de44..4c7f7993f544 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -308,6 +308,11 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err return aws.Credentials{Source: ProviderName}, err } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + return aws.Credentials{ AccessKeyID: *resp.Credentials.AccessKeyId, SecretAccessKey: *resp.Credentials.SecretAccessKey, @@ -316,5 +321,6 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index ddaf6df6ce11..b4b719708620 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -135,6 +136,11 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err) } + var accountID string + if resp.AssumedRoleUser != nil { + accountID = getAccountID(resp.AssumedRoleUser) + } + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. @@ -145,6 +151,19 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials Source: WebIdentityProviderName, CanExpire: true, Expires: *resp.Credentials.Expiration, + AccountID: accountID, } return value, nil } + +// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]" +func getAccountID(u *types.AssumedRoleUser) string { + if u.Arn == nil { + return "" + } + parts := strings.Split(*u.Arn, ":") + if len(parts) < 5 { + return "" + } + return parts[4] +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 40c317a967bf..3584159df8ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,73 @@ +# v1.16.11 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2024-03-21) + +* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. + +# v1.15.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.14.11 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go index 46e144d93636..3f4a10e2c164 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -185,6 +185,10 @@ type Options struct { // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html EnableFallback aws.Ternary + // By default, all IMDS client operations enforce a 5-second timeout. You + // can disable that behavior with this setting. + DisableDefaultTimeout bool + // provides the caching of API tokens used for operation calls. If unset, // the API token will not be retrieved for the operation. tokenProvider *tokenProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go index bacdb5d21f26..d5765c36b170 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -3,8 +3,9 @@ // // All Client operation calls have a default timeout. If the operation is not // completed before this timeout expires, the operation will be canceled. This -// timeout can be overridden by providing Context with a timeout or deadline -// with calling the client's operations. +// timeout can be overridden through the following: +// - Set the options flag DisableDefaultTimeout +// - Provide a Context with a timeout or deadline with calling the client's operations. // // See the EC2 IMDS user guide for more information on using the API. // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 0d747b213f19..1cd7560633ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.11" +const goModuleVersion = "1.16.11" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go index fc948c27d89d..90cf4aeb3df3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -56,6 +56,7 @@ func addRequestMiddleware(stack *middleware.Stack, // Operation timeout err = stack.Initialize.Add(&operationTimeout{ + Disabled: options.DisableDefaultTimeout, DefaultTimeout: defaultOperationTimeout, }, middleware.Before) if err != nil { @@ -260,6 +261,7 @@ const ( // Otherwise the timeout cleanup will race the resource being consumed // upstream. type operationTimeout struct { + Disabled bool DefaultTimeout time.Duration } @@ -270,6 +272,10 @@ func (m *operationTimeout) HandleInitialize( ) ( output middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { + if m.Disabled { + return next.HandleInitialize(ctx, input) + } + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { var cancelFn func() ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md index fe53e9a5c8d3..1a1644214014 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -1,3 +1,146 @@ +# v1.17.8 (2024-07-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2024-07-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-07-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-06-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.25 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.24 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.23 (2024-06-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.22 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.21 (2024-05-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.20 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.19 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.18 (2024-05-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.17 (2024-05-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2024-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2024-04-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.14 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2024-03-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2024-03-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2024-02-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2024-02-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2024-02-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2024-02-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.15 (2024-01-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go index 06070adadded..5a9fe2dd34c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go @@ -183,7 +183,10 @@ func (d Downloader) Download(ctx context.Context, w io.WriterAt, input *s3.GetOb // Copy ClientOptions clientOptions := make([]func(*s3.Options), 0, len(impl.cfg.ClientOptions)+1) clientOptions = append(clientOptions, func(o *s3.Options) { - o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey)) + o.APIOptions = append(o.APIOptions, + middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + addFeatureUserAgent, // yes, there are two of these + ) }) clientOptions = append(clientOptions, impl.cfg.ClientOptions...) impl.cfg.ClientOptions = clientOptions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go index 6fe1aa681d07..3c91daf67cac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -3,4 +3,4 @@ package manager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.15" +const goModuleVersion = "1.17.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go index d1be506e06f5..d73a6d3b9135 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go @@ -311,6 +311,7 @@ func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ... clientOptions = append(clientOptions, func(o *s3.Options) { o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + addFeatureUserAgent, // yes, there are two of these func(s *smithymiddleware.Stack) error { return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After) }, @@ -853,3 +854,31 @@ func (*setS3ExpressDefaultChecksum) HandleFinalize( return next.HandleFinalize(ctx, in) } + +func addFeatureUserAgent(stack *smithymiddleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(middleware.UserAgentFeatureS3Transfer) + return nil +} + +func getOrAddRequestUserAgent(stack *smithymiddleware.Stack) (*middleware.RequestUserAgent, error) { + id := (*middleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = middleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, smithymiddleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*middleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go index 0c5a2d40c9f3..24db8e144cba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go @@ -5,6 +5,7 @@ import ( "fmt" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" "github.com/aws/smithy-go/auth" @@ -39,7 +40,10 @@ func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + signingTime := sdk.NowTime() + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime = signingTime.Add(skew) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go deleted file mode 100644 index 58ef438a1964..000000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/path_value.go +++ /dev/null @@ -1,225 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.EqualFold(name, c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, valItem := range values { - value := reflect.Indirect(valItem) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - if dstVal.Kind() == reflect.String { - dstVal.SetString(srcVal.String()) - } else { - dstVal.Set(srcVal) - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index dc87ec41027c..3c1d846e03d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,68 @@ +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 41ee0bfbe3e5..7926a49c2443 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.10" +const goModuleVersion = "1.3.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go index 15bf104772f9..f0c283d3942b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go @@ -2,12 +2,14 @@ package context import ( "context" + "time" "github.com/aws/smithy-go/middleware" ) type s3BackendKey struct{} type checksumInputAlgorithmKey struct{} +type clockSkew struct{} const ( // S3BackendS3Express identifies the S3Express backend @@ -37,3 +39,14 @@ func GetChecksumInputAlgorithm(ctx context.Context) string { v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) return v } + +// SetAttemptSkewContext sets the clock skew value on the context +func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context { + return middleware.WithStackValue(ctx, clockSkew{}, v) +} + +// GetAttemptSkewContext gets the clock skew value from the context +func GetAttemptSkewContext(ctx context.Context) time.Duration { + x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration) + return x +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go index ba6032758a57..91414afe81c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -12,11 +12,12 @@ type Partition struct { // PartitionConfig provides the endpoint metadata for an AWS region or partition. type PartitionConfig struct { - Name string `json:"name"` - DnsSuffix string `json:"dnsSuffix"` - DualStackDnsSuffix string `json:"dualStackDnsSuffix"` - SupportsFIPS bool `json:"supportsFIPS"` - SupportsDualStack bool `json:"supportsDualStack"` + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` + ImplicitGlobalRegion string `json:"implicitGlobalRegion"` } type RegionOverrides struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go index 849beffd7da0..5f0779997deb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -13,11 +13,12 @@ var partitions = []Partition{ ID: "aws", RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-east-1", }, Regions: map[string]RegionOverrides{ "af-south-1": { @@ -111,6 +112,13 @@ var partitions = []Partition{ SupportsFIPS: nil, SupportsDualStack: nil, }, + "ca-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "eu-central-1": { Name: nil, DnsSuffix: nil, @@ -229,11 +237,12 @@ var partitions = []Partition{ ID: "aws-cn", RegionRegex: "^cn\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-cn", - DnsSuffix: "amazonaws.com.cn", - DualStackDnsSuffix: "api.amazonwebservices.com.cn", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "cn-northwest-1", }, Regions: map[string]RegionOverrides{ "aws-cn-global": { @@ -263,11 +272,12 @@ var partitions = []Partition{ ID: "aws-us-gov", RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", - SupportsFIPS: true, - SupportsDualStack: true, + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides{ "aws-us-gov-global": { @@ -297,11 +307,12 @@ var partitions = []Partition{ ID: "aws-iso", RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso", - DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-global": { @@ -331,11 +342,12 @@ var partitions = []Partition{ ID: "aws-iso-b", RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-b", - DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides{ "aws-iso-b-global": { @@ -358,23 +370,33 @@ var partitions = []Partition{ ID: "aws-iso-e", RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-e", - DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-e", + DnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "cloud.adc-e.uk", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "eu-isoe-west-1", + }, + Regions: map[string]RegionOverrides{ + "eu-isoe-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, }, - Regions: map[string]RegionOverrides{}, }, { ID: "aws-iso-f", RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-iso-f", - DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", - SupportsFIPS: true, - SupportsDualStack: false, + Name: "aws-iso-f", + DnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "csp.hci.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + ImplicitGlobalRegion: "us-isof-south-1", }, Regions: map[string]RegionOverrides{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index f376f6908aaf..7a28569c3dec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -198,7 +198,11 @@ "supportsFIPS" : true }, "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", - "regions" : { } + "regions" : { + "eu-isoe-west-1" : { + "description" : "EU ISOE West" + } + } }, { "id" : "aws-iso-f", "outputs" : { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index e0265474c4fa..549df6013c6b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,69 @@ +# v2.6.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v2.5.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index bec2c6a1e9c0..dcb5a4b93be1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.5.10" +const goModuleVersion = "2.6.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 9a0b5e037766..c0e54faff28a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.8.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # v1.7.3 (2024-01-22) * **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 1d68f8346d44..6e0b906c3460 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.7.3" +const goModuleVersion = "1.8.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go new file mode 100644 index 000000000000..8e24a3f0a470 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "context" + "sync/atomic" + "time" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// AddTimeOffsetMiddleware sets a value representing clock skew on the request context. +// This can be read by other operations (such as signing) to correct the date value they send +// on the request +type AddTimeOffsetMiddleware struct { + Offset *atomic.Int64 +} + +// ID the identifier for AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" } + +// HandleBuild sets a value for attemptSkew on the request context if one is set on the client. +func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + if m.Offset != nil { + offset := time.Duration(m.Offset.Load()) + ctx = internalcontext.SetAttemptSkewContext(ctx, offset) + } + return next.HandleBuild(ctx, in) +} + +// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer +// held by AddTimeOffsetMiddleware +func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 { + m.Offset.Store(v.Nanoseconds()) + } + return next.HandleDeserialize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index 8aa94972d34a..2bf7d32f4bdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,69 @@ +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index 2a5888c1b95d..6fe0cd600f2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.10" +const goModuleVersion = "1.3.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go index 516d459d5dce..af4f6abcfa73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" @@ -72,7 +74,11 @@ func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, sdk.NowTime(), func(o *SignerOptions) { + signingTime := sdk.NowTime() + if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 { + signingTime.Add(skew) + } + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index c3525fd22983..2c9b1d6d4f3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,19 @@ +# v1.11.3 (2024-06-28) + +* No change notes available for this release. + +# v1.11.2 (2024-03-29) + +* No change notes available for this release. + +# v1.11.1 (2024-02-21) + +* No change notes available for this release. + +# v1.11.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # v1.10.4 (2023-12-07) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index cc6384005aac..b59fb2afcc06 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.4" +const goModuleVersion = "1.11.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index 8f9740361746..4c8a8759f6b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,77 @@ +# v1.3.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.5 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.2.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index a88534d2a733..c144a69a2ff5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.2.10" +const goModuleVersion = "1.3.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index a65890b58f35..c03183e1c539 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,77 @@ +# v1.11.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.6 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.5 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2024-03-05) + +* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility. + +# v1.11.3 (2024-03-04) + +* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API. + +# v1.11.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.10.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go index cc919701a06f..5d5286f92cc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go @@ -27,13 +27,21 @@ func GetIsPresigning(ctx context.Context) bool { type isPresigningKey struct{} -// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that +// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that // will update the stack's context to be flagged as being invoked for the // purpose of presigning. -func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { +func AddAsIsPresigningMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before) } +// AddAsIsPresigingMiddleware is an alias for backwards compatibility. +// +// Deprecated: This API was released with a typo. Use +// [AddAsIsPresigningMiddleware] instead. +func AddAsIsPresigingMiddleware(stack *middleware.Stack) error { + return AddAsIsPresigningMiddleware(stack) +} + type asIsPresigningMiddleware struct{} func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 073e8866b75d..a21b047962d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.10.10" +const goModuleVersion = "1.11.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index c4df2176519b..bf568e82599f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,68 @@ +# v1.17.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2024-02-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.10 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index 986affe186a7..04e649691ddf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.10" +const goModuleVersion = "1.17.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index 89a5a0d5259e..5a2eee1a866e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,131 @@ +# v1.58.2 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.1 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.0 (2024-07-02) + +* **Feature**: Added response overrides to Head Object requests. + +# v1.57.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.57.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.56.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.56.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.2 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.1 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.0 (2024-06-05) + +* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. +* **Bug Fix**: Add S3-specific smithy protocol tests. + +# v1.54.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.3 (2024-05-23) + +* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK). + +# v1.54.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.0 (2024-05-14) + +* **Feature**: Updated a few x-id in the http uri traits + +# v1.53.2 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.53.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.0 (2024-03-18) + +* **Feature**: Fix two issues with response root node names. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.52.1 (2024-03-15) + +* **Documentation**: Documentation updates for Amazon S3. + +# v1.52.0 (2024-03-13) + +* **Feature**: This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT). + +# v1.51.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.3 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.2 (2024-03-04) + +* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.51.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.50.3 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.50.2 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.50.1 (2024-02-19) + +* **Bug Fix**: Prevent potential panic caused by invalid comparison of credentials. + +# v1.50.0 (2024-02-16) + +* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters. + +# v1.49.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.48.1 (2024-01-24) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index db35814d356a..97730b2cc666 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -14,6 +14,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" "github.com/aws/aws-sdk-go-v2/internal/v4a" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -22,12 +23,14 @@ import ( s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -38,6 +41,9 @@ const ServiceAPIVersion = "2006-03-01" // Storage Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -82,6 +88,8 @@ func New(options Options, optFns ...func(*Options)) *Client { finalizeExpressCredentials(&options, client) + initializeTimeOffsetResolver(client) + return client } @@ -259,15 +267,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -394,17 +403,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { } func addClientUserAgent(stack *middleware.Stack, options Options) error { - if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)(stack); err != nil { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { return err } + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion) if len(options.AppID) > 0 { - return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) } return nil } +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -424,12 +453,72 @@ func newDefaultV4Signer(o Options) *v4.Signer { }) } -func addRetryMiddlewares(stack *middleware.Stack, o Options) error { - mo := retry.AddRetryMiddlewaresOptions{ - Retryer: o.Retryer, - LogRetryAttempts: o.ClientLogMode.IsRetries(), +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err } - return retry.AddRetryMiddlewares(stack, mo) + return nil } // resolves UseARNRegion S3 configuration @@ -492,6 +581,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + type httpSignerV4a interface { SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, service string, regionSet []string, signingTime time.Time, @@ -512,6 +613,51 @@ func newDefaultV4aSigner(o Options) *v4a.Signer { }) } +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { return s3shared.AddMetadataRetrieverMiddleware(stack) } @@ -520,6 +666,10 @@ func add100Continue(stack *middleware.Stack, options Options) error { return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes) } +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + // ComputedInputChecksumsMetadata provides information about the algorithms used // to compute the checksum(s) of the input payload. type ComputedInputChecksumsMetadata struct { @@ -792,7 +942,7 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op if err != nil { return err } - err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go index d1e7dceaee23..00e395efb62a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -18,22 +18,27 @@ import ( // by any previously uploaded parts will be freed. However, if any part uploads are // currently in progress, those part uploads might or might not succeed. As a // result, it might be necessary to abort a given multipart upload multiple times -// in order to completely free all storage consumed by all parts. To verify that -// all parts have been removed and prevent getting charged for the part storage, -// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// API operation and ensure that the parts list is empty. Directory buckets - For -// directory buckets, you must make requests for this API operation to the Zonal -// endpoint. These endpoints support virtual-hosted-style requests in the format +// in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed and prevent getting charged for the +// part storage, you should call the [ListParts]API operation and ensure that the parts list +// is empty. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -41,17 +46,31 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to AbortMultipartUpload : +// +// [CreateMultipartUpload] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to AbortMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { if params == nil { params = &AbortMultipartUploadInput{} @@ -69,31 +88,39 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar type AbortMultipartUploadInput struct { - // The bucket name to which the upload was taking place. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // The bucket name to which the upload was taking place. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -117,16 +144,19 @@ type AbortMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -135,7 +165,9 @@ func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -166,25 +198,25 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -202,6 +234,15 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -211,7 +252,7 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go index b9f094f1e102..29c7e3ea8e91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -13,51 +13,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Completes a multipart upload by assembling previously uploaded parts. You first -// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. After successfully uploading all relevant parts of an upload, you -// call this CompleteMultipartUpload operation to complete the upload. Upon -// receiving this request, Amazon S3 concatenates all the parts in ascending order -// by part number to create a new object. In the CompleteMultipartUpload request, -// you must provide the parts list and ensure that the parts list is complete. The -// CompleteMultipartUpload API operation concatenates the parts that you provide in -// the list. For each part in the list, you must provide the PartNumber value and -// the ETag value that are returned after that part was uploaded. The processing -// of a CompleteMultipartUpload request could take several minutes to finalize. -// After Amazon S3 begins processing the request, it sends an HTTP response header -// that specifies a 200 OK response. While processing is in progress, Amazon S3 -// periodically sends white space characters to keep the connection from timing -// out. A request could fail after the initial 200 OK response has been sent. This -// means that a 200 OK response can contain either a success or an error. The -// error response might be embedded in the 200 OK response. If you call this API -// operation directly, make sure to design your application to parse the contents -// of the response and handle it appropriately. If you use Amazon Web Services -// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply -// error handling per your configuration settings (including automatically retrying -// the request as appropriate). If the condition persists, the SDKs throw an -// exception (or, for the SDKs that don't use exceptions, they return an error). +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the [UploadPart] +// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of +// an upload, you call this CompleteMultipartUpload operation to complete the +// upload. Upon receiving this request, Amazon S3 concatenates all the parts in +// ascending order by part number to create a new object. In the +// CompleteMultipartUpload request, you must provide the parts list and ensure that +// the parts list is complete. The CompleteMultipartUpload API operation +// concatenates the parts that you provide in the list. For each part in the list, +// you must provide the PartNumber value and the ETag value that are returned +// after that part was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either a +// success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect +// the embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). +// // Note that if CompleteMultipartUpload fails, applications should be prepared to -// retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) -// . You can't use Content-Type: application/x-www-form-urlencoded for the +// retry any failed requests (including 500 error responses). For more information, +// see [Amazon S3 Error Best Practices]. +// +// You can't use Content-Type: application/x-www-form-urlencoded for the // CompleteMultipartUpload requests. Also, if you don't provide a Content-Type -// header, CompleteMultipartUpload can still return a 200 OK response. For more -// information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// header, CompleteMultipartUpload can still return a 200 OK response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -65,36 +73,65 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Special errors +// // - Error Code: EntityTooSmall +// // - Description: Your proposed upload is smaller than the minimum allowed // object size. Each part must be at least 5 MB in size, except the last part. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPart +// // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified ETag might not have matched // the uploaded part's ETag. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPartOrder +// // - Description: The list of parts was not in ascending order. The parts list // must be specified in order by part number. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CompleteMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to CompleteMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { if params == nil { params = &CompleteMultipartUploadInput{} @@ -112,31 +149,39 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // Name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -153,30 +198,34 @@ type CompleteMultipartUploadInput struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -191,38 +240,47 @@ type CompleteMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is required only when the object was created using a checksum // algorithm or if your bucket policy requires the use of SSE-C. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -231,13 +289,15 @@ func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be @@ -245,8 +305,10 @@ type CompleteMultipartUploadOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -254,8 +316,10 @@ type CompleteMultipartUploadOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -263,8 +327,10 @@ type CompleteMultipartUploadOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -272,8 +338,10 @@ type CompleteMultipartUploadOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag that identifies the newly created object's data. Objects with @@ -282,12 +350,14 @@ type CompleteMultipartUploadOutput struct { // data. If the entity tag is not an MD5 digest of the object data, it will contain // one or more nonhexadecimal characters and/or will consist of less than 32 or // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // is calculated, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ETag *string // If the object expiration is configured, this will contain the expiration date ( // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // // This functionality is not supported for directory buckets. Expiration *string @@ -298,21 +368,28 @@ type CompleteMultipartUploadOutput struct { Location *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // (for example, AES256 , aws:kms ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning - // turned on. This functionality is not supported for directory buckets. + // turned on. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -343,25 +420,25 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -379,6 +456,15 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -388,7 +474,7 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go index deb21cee0516..5f8275ca6508 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -15,97 +15,133 @@ import ( "time" ) -// Creates a copy of an object that is already stored in Amazon S3. You can store -// individual objects of up to 5 TB in Amazon S3. You create a copy of your object -// up to 5 GB in size in a single atomic action using this API. However, to copy an -// object greater than 5 GB, you must use the multipart upload Upload Part - Copy -// (UploadPartCopy) API. For more information, see Copy Object Using the REST -// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) -// . You can copy individual objects between general purpose buckets, between +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a copy +// of your object up to 5 GB in size in a single atomic action using this API. +// However, to copy an object greater than 5 GB, you must use the multipart upload +// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. +// +// You can copy individual objects between general purpose buckets, between // directory buckets, and between general purpose buckets and directory buckets. +// // Directory buckets - For directory buckets, you must make requests for this API // operation to the Zonal endpoint. These endpoints support virtual-hosted-style // requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Both the Region that you want to copy the object -// from and the Region that you want to copy the object to must be enabled for your -// account. Amazon S3 transfer acceleration does not support cross-Region copies. -// If you request a cross-Region copy using a transfer acceleration endpoint, you -// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . Authentication and authorization All CopyObject requests must be -// authenticated and signed by using IAM credentials (access key ID and secret -// access key for the IAM identities). All headers with the x-amz- prefix, -// including x-amz-copy-source , must be signed. For more information, see REST -// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use the IAM credentials to authenticate and +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Both the Region that you want to copy the object from and the Region that you +// want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon +// Web Services Account Management Guide. +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If you +// request a cross-Region copy using a transfer acceleration endpoint, you get a +// 400 Bad Request error. For more information, see [Transfer Acceleration]. +// +// Authentication and authorization All CopyObject requests must be authenticated +// and signed by using IAM credentials (access key ID and secret access key for the +// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source +// , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use the IAM credentials to authenticate and // authorize your access to the CopyObject API operation, instead of using the -// temporary security credentials through the CreateSession API operation. Amazon -// Web Services CLI or SDKs handles authentication and authorization on your -// behalf. Permissions You must have read access to the source object and write -// access to the destination bucket. +// temporary security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// +// Permissions You must have read access to the source object and write access to +// the destination bucket. +// // - General purpose bucket permissions - You must have permissions in an IAM // policy based on the source and destination bucket types in a CopyObject // operation. +// // - If the source object is in a general purpose bucket, you must have // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have -// s3:PubObject permission to write the object copy to the destination bucket. +// s3:PutObject permission to write the object copy to the destination bucket. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in a CopyObject operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a // policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key can't -// be set to ReadOnly on the copy destination bucket. For example policies, see -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination bucket. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Response and special errors When the request is an HTTP 1.1 request, the // response is chunk encoded. When the request is not an HTTP 1.1 request, the // response would not contain the Content-Length . You always need to read the -// entire response body to check if the copy succeeds. to keep the connection alive -// while we copy the data. +// entire response body to check if the copy succeeds. +// // - If the copy is successful, you receive a response with information about // the copied object. +// // - A copy request might return an error when Amazon S3 receives the copy // request or while Amazon S3 is copying the files. A 200 OK response can contain // either a success or an error. +// // - If the error occurs before the copy action starts, you receive a standard // Amazon S3 error. +// // - If the error occurs during the copy operation, the error response is // embedded in the 200 OK response. For example, in a cross-region copy, you may -// encounter throttling and receive a 200 OK response. For more information, see -// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK -// status code means the copy was accepted, but it doesn't mean the copy is -// complete. Another example is when you disconnect from Amazon S3 before the copy -// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK -// response. You must stay connected to Amazon S3 until the entire response is -// successfully received and processed. If you call this API operation directly, -// make sure to design your application to parse the content of the response and -// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this -// condition. The SDKs detect the embedded error and apply error handling per your -// configuration settings (including automatically retrying the request as -// appropriate). If the condition persists, the SDKs throw an exception (or, for -// the SDKs that don't use exceptions, they return an error). +// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] +// . The 200 OK status code means the copy was accepted, but it doesn't mean the +// copy is complete. Another example is when you disconnect from Amazon S3 before +// the copy is complete, Amazon S3 might cancel the copy and you may receive a +// 200 OK response. You must stay connected to Amazon S3 until the entire +// response is successfully received and processed. +// +// If you call this API operation directly, make sure to design your application +// +// to parse the content of the response and handle it appropriately. If you use +// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the +// embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). // // Charge The copy request charge is based on the storage class and Region that // you specify for the destination object. The request can also result in a data // retrieval charge for the source if the source storage class bills for data -// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/) -// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CopyObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// retrieval. If the copy source is in a different region, the data transfer is +// billed to the copy source account. For pricing information, see [Amazon S3 pricing]. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to CopyObject : +// +// [PutObject] +// +// [GetObject] +// +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror +// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { if params == nil { params = &CopyObjectInput{} @@ -123,31 +159,39 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns type CopyObjectInput struct { - // The name of the destination bucket. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // The name of the destination bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -155,10 +199,11 @@ type CopyObjectInput struct { // Specifies the source object for the copy operation. The source object can be up // to 5 GB. If the source object is an object that was uploaded by using a // multipart upload, the object copy will be a single part object after the source - // object is copied to the destination bucket. You specify the value of the copy - // source in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending on + // whether you want to access the source object through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and the key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the general purpose @@ -167,6 +212,7 @@ type CopyObjectInput struct { // bucket awsexamplebucket--use1-az5--x-s3 , use // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be // URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -174,15 +220,20 @@ type CopyObjectInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your source bucket versioning is enabled, the x-amz-copy-source header by // default identifies the current version of an object to copy. If the current // version is a delete marker, Amazon S3 behaves as if the object was deleted. To @@ -190,14 +241,21 @@ type CopyObjectInput struct { // append ?versionId= to the value (for example, // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. If you enable versioning on the destination bucket, Amazon S3 - // generates a unique version ID for the copied object. This version ID is - // different from the version ID of the source object. Amazon S3 returns the - // version ID of the copied object in the x-amz-version-id response header in the - // response. If you do not enable versioning or suspend it on the destination - // bucket, the version ID that Amazon S3 generates in the x-amz-version-id - // response header is always null. Directory buckets - S3 Versioning isn't enabled - // and supported for directory buckets. + // source object. + // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from the + // version ID of the source object. Amazon S3 returns the version ID of the copied + // object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, the + // version ID that Amazon S3 generates in the x-amz-version-id response header is + // always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -207,51 +265,67 @@ type CopyObjectInput struct { // This member is required. Key *string - // The canned access control list (ACL) to apply to the object. When you copy an - // object, the ACL metadata is not preserved and is set to private by default. - // Only the owner has full access control. To override the default ACL setting, - // specify a new ACL when you generate a copy request. For more information, see - // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . If the destination bucket that you're copying objects to uses the bucket owner + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to private + // by default. Only the owner has full access control. To override the default ACL + // setting, specify a new ACL when you generate a copy request. For more + // information, see [Using ACLs]. + // + // If the destination bucket that you're copying objects to uses the bucket owner // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect // permissions. Buckets that use this setting only accept PUT requests that don't // specify an ACL or PUT requests that specify bucket owner full control ACLs, // such as the bucket-owner-full-control canned ACL or an equivalent form of this - // ACL expressed in the XML format. For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 + // User Guide. + // // - If your destination bucket uses the bucket owner enforced setting for // Object Ownership, all objects written to the bucket by any account will be owned // by the bucket owner. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + // // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. For more information, see - // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // affect bucket-level settings for S3 Bucket Key. + // + // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html BucketKeyEnabled *bool // Specifies the caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. When you copy an object, if the source object has a - // checksum, that checksum value will be copied to the new object by default. If - // the CopyObject request does not include this x-amz-checksum-algorithm header, - // the checksum algorithm will be copied from the source object to the destination - // object (if it's present on the source object). You can optionally specify a - // different checksum algorithm to use with the x-amz-checksum-algorithm header. - // Unrecognized or unsupported values will respond with the HTTP status code 400 - // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, - // CRC32 is the default checksum algorithm that's used for performance. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's present + // on the source object). You can optionally specify a different checksum algorithm + // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported + // values will respond with the HTTP status code 400 Bad Request . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Specifies presentational information for the object. Indicates whether an @@ -261,8 +335,10 @@ type CopyObjectInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language the content is in. @@ -271,62 +347,85 @@ type CopyObjectInput struct { // A standard MIME type that describes the format of the object data. ContentType *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both the - // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers - // are present in the request and evaluate as follows, Amazon S3 returns the 412 - // Precondition Failed response code: + // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request and // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, - // you must provide the necessary encryption information in your request so that - // Amazon S3 can decrypt the object for copying. This functionality is not - // supported when the source object is in a directory bucket. + // AES256 ). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be the same - // one that was used when the source object was created. If the source object for - // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary - // encryption information in your request so that Amazon S3 can decrypt the object - // for copying. This functionality is not supported when the source object is in a - // directory bucket. + // one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. If the source object for the copy - // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption - // information in your request so that Amazon S3 can decrypt the object for - // copying. This functionality is not supported when the source object is in a - // directory bucket. + // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -343,22 +442,30 @@ type CopyObjectInput struct { Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string @@ -368,26 +475,32 @@ type CopyObjectInput struct { // Specifies whether the metadata is copied from the source object or replaced // with metadata that's provided in the request. When copying an object, you can // preserve all metadata (the default) or specify new metadata. If this header - // isn’t specified, COPY is the default behavior. General purpose bucket - For - // general purpose buckets, when you grant permissions, you can use the - // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior - // when objects are uploaded. For more information, see Amazon S3 condition key - // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) - // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each - // object and is not copied when using the x-amz-metadata-directive header. To - // copy the value, you must specify x-amz-website-redirect-location in the request - // header. + // isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant + // permissions, you can use the s3:x-amz-metadata-directive condition key to + // enforce certain metadata behavior when objects are uploaded. For more + // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied when + // using the x-amz-metadata-directive header. To copy the value, you must specify + // x-amz-website-redirect-location in the request header. + // + // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html MetadataDirective types.MetadataDirective - // Specifies whether you want to apply a legal hold to the object copy. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to the object copy. This - // functionality is not supported for directory buckets. + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want the Object Lock of the object copy to expire. + // // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time @@ -395,19 +508,23 @@ type CopyObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). When you perform a CopyObject operation, if you want to use a different type - // of encryption setting for the target object, you can specify appropriate + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // When you perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate // encryption-related headers to encrypt the target object with an Amazon S3 // managed key, a KMS key, or a customer-provided key. If the encryption setting in // your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. + // // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerAlgorithm *string @@ -416,38 +533,49 @@ type CopyObjectInput struct { // encrypting data. This value is used to store the object and then it is // discarded. Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported when the destination bucket is a directory bucket. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding // JSON with the encryption context key-value pairs. This value must be explicitly - // added to specify encryption context for CopyObject requests. This functionality - // is not supported when the destination bucket is a directory bucket. + // added to specify encryption context for CopyObject requests. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSEKMSEncryptionContext *string // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object // encryption. All GET and PUT requests for an object protected by KMS will fail if // they're not made via SSL or using SigV4. For information about configuring any // of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported // values won’t write a destination object and will receive a 400 Bad Request - // response. Amazon S3 automatically encrypts all new objects that are copied to an - // S3 bucket. When copying an object, if you don't specify encryption information - // in your copy request, the encryption setting of the target object is set to the + // response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information in + // your copy request, the encryption setting of the target object is set to the // default encryption configuration of the destination bucket. By default, all // buckets have a base level of encryption configuration that uses server-side // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a @@ -455,42 +583,55 @@ type CopyObjectInput struct { // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS - // key, or a customer-provided key to encrypt the target object copy. When you - // perform a CopyObject operation, if you want to use a different type of + // key, or a customer-provided key to encrypt the target object copy. + // + // When you perform a CopyObject operation, if you want to use a different type of // encryption setting for the target object, you can specify appropriate // encryption-related headers to encrypt the target object with an Amazon S3 // managed key, a KMS key, or a customer-provided key. If the encryption setting in // your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. - // With server-side encryption, Amazon S3 encrypts your data as it writes your data - // to disks in its data centers and decrypts the data when you access it. For more - // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) - // in the Amazon S3 User Guide. For directory buckets, only server-side encryption - // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. For + // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html ServerSideEncryption types.ServerSideEncryption // If the x-amz-storage-class header is not used, the copied object will be stored // in the STANDARD Storage Class by default. The STANDARD storage class provides // high durability and high availability. Depending on performance needs, you can // specify a different Storage Class. + // // - Directory buckets - For directory buckets, only the S3 Express One Zone // storage class is supported to store newly created objects. Unsupported storage // class values won't write a destination object and will respond with the HTTP // status code 400 Bad Request . + // // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // // You can use the CopyObject action to change the storage class of an object that // is already stored in Amazon S3 by using the x-amz-storage-class header. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. Before using an object as a source object for the - // copy operation, you must restore a copy of it if it meets any of the following - // conditions: + // more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . - // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 - // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) - // is Archive Access or Deep Archive Access . - // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) - // in the Amazon S3 User Guide. + // + // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is + // Archive Access or Deep Archive Access . + // + // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html + // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition StorageClass types.StorageClass // The tag-set for the object copy in the destination bucket. This value must be @@ -498,60 +639,82 @@ type CopyObjectInput struct { // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive // , you don't need to set the x-amz-tagging header, because the tag-set will be // copied from the source object directly. The tag-set must be encoded as URL Query - // parameters. The default value is the empty value. Directory buckets - For - // directory buckets in a CopyObject operation, only the empty tag-set is - // supported. Any requests that attempt to write non-empty tags into directory - // buckets will receive a 501 Not Implemented status code. When the destination - // bucket is a directory bucket, you will receive a 501 Not Implemented response - // in any of the following situations: + // parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. Tagging *string // Specifies whether the object tag-set is copied from the source object or - // replaced with the tag-set that's provided in the request. The default value is - // COPY . Directory buckets - For directory buckets in a CopyObject operation, - // only the empty tag-set is supported. Any requests that attempt to write - // non-empty tags into directory buckets will receive a 501 Not Implemented status - // code. When the destination bucket is a directory bucket, you will receive a 501 - // Not Implemented response in any of the following situations: + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY . + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. @@ -562,71 +725,87 @@ type CopyObjectInput struct { // Amazon S3 stores the value of this header in the object metadata. This value is // unique to each object and is not copied when using the x-amz-metadata-directive // header. Instead, you may opt to provide this header in combination with the - // x-amz-metadata-directive header. This functionality is not supported for - // directory buckets. + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.CopySource = in.CopySource + p.Key = in.Key p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult - // Version ID of the source object that was copied. This functionality is not - // supported when the source object is in a directory bucket. + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string - // If the object expiration is configured, the response includes this header. This - // functionality is not supported for directory buckets. + // If the object expiration is configured, the response includes this header. + // + // This functionality is not supported for directory buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // holding JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created copy. This functionality is not supported for - // directory buckets. + // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -657,25 +836,25 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -693,6 +872,15 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpCopyObjectValidationMiddleware(stack); err != nil { return err } @@ -702,7 +890,7 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go index 6357444ba102..6334c1cfcf9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -15,82 +15,116 @@ import ( ) // This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts -// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) -// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and -// have a valid Amazon Web Services Access Key ID to authenticate requests. -// Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. There are two types of buckets: general purpose -// buckets and directory buckets. For more information about these bucket types, -// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// in the Amazon S3 User Guide. +// bucket, see [CreateBucket]CreateBucket . +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have +// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you become +// the bucket owner. +// +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. +// // - General purpose buckets - If you send your CreateBucket request to the // s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So // the signature calculations in Signature Version 4 must use us-east-1 as the // Region, even if the location constraint in the request specifies another Region // where the bucket is to be created. If you create a bucket in a Region other than // US East (N. Virginia), your application must be able to handle 307 redirect. For -// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) -// in the Amazon S3 User Guide. +// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format // https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in +// the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - In addition to the s3:CreateBucket // permission, the following permissions are required in a policy when your // CreateBucket request includes specific headers: +// // - Access control lists (ACLs) - In your CreateBucket request, if you specify // an access control list (ACL) and set it to public-read , public-read-write , // authenticated-read , or if you explicitly specify any other custom ACLs, both // s3:CreateBucket and s3:PutBucketAcl permissions are required. In your // CreateBucket request, if you set the ACL to private , or if you don't specify // any ACLs, only the s3:CreateBucket permission is required. +// // - Object Lock - In your CreateBucket request, if you set // x-amz-bucket-object-lock-enabled to true, the // s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are // required. +// // - S3 Object Ownership - If your CreateBucket request includes the // x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. If your CreateBucket request sets BucketOwnerEnforced -// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access -// to an external Amazon Web Services account, your request fails with a 400 -// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more -// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) -// in the Amazon S3 User Guide. -// - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. Specifically, -// you can create a new bucket with Block Public Access enabled, then separately -// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about S3 Block Public Access, see Blocking -// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - You must have the s3express:CreateBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object -// Ownership, and S3 Block Public Access are not supported for directory buckets. -// For directory buckets, all Block Public Access settings are enabled at the -// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs -// disabled). These settings can't be modified. For more information about -// permissions for creating and working with directory buckets, see Directory -// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. For more information about supported S3 features -// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) -// in the Amazon S3 User Guide. +// permission is required. +// +// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly +// +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and then +// explicitly disable Block Public Access on the bucket before using PutBucketAcl +// to set the ACL. If you try to create a bucket with a public ACL, the request +// will fail. +// +// For the majority of modern use cases in S3, we recommend that you keep all +// +// Block Public Access settings enabled and keep ACLs disabled. If you would like +// to share data with users outside of your account, you can use bucket policies as +// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - S3 Block Public Access - If your specific use case requires granting public +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the +// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block +// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// CreateBucket : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public +// +// Access are not supported for directory buckets. For directory buckets, all Block +// Public Access settings are enabled at the bucket level and S3 Object Ownership +// is set to Bucket owner enforced (ACLs disabled). These settings can't be +// modified. +// +// For more information about permissions for creating and working with directory +// +// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about +// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The following operations are related to CreateBucket : +// +// [PutObject] +// +// [DeleteBucket] +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html +// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html +// +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features +// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { if params == nil { params = &CreateBucketInput{} @@ -108,77 +142,100 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op type CreateBucketInput struct { - // The name of the bucket to create. General purpose buckets - For information - // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) - // in the Amazon S3 User Guide. Directory buckets - When you use this operation - // with a directory bucket, you must use path-style requests in the format + // The name of the bucket to create. + // + // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + // Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html // // This member is required. Bucket *string - // The canned ACL to apply to the bucket. This functionality is not supported for - // directory buckets. + // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. ACL types.BucketCannedACL // The configuration information for the bucket. CreateBucketConfiguration *types.CreateBucketConfiguration // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for directory buckets. + // bucket. + // + // This functionality is not supported for directory buckets. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for directory buckets. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // directory buckets. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. This functionality is not supported for directory buckets. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. + // + // This functionality is not supported for directory buckets. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for directory buckets. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ObjectOwnership types.ObjectOwnership noSmithyDocumentSerde } func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) p.DisableAccessPoints = ptr.Bool(true) @@ -217,25 +274,25 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -253,6 +310,15 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpCreateBucketValidationMiddleware(stack); err != nil { return err } @@ -262,7 +328,7 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go index 4f24e11c4646..a3d6ef9e4c3d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -16,62 +16,64 @@ import ( // This action initiates a multipart upload and returns an upload ID. This upload // ID is used to associate all of the parts in the specific multipart upload. You -// specify this upload ID in each of your subsequent upload part requests (see -// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// ). You also include this upload ID in the final request to either complete or -// abort the multipart upload request. For more information about multipart -// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide. After you initiate a multipart upload and upload -// one or more parts, to stop being charged for storing the uploaded parts, you -// must either complete or abort the multipart upload. Amazon S3 frees up the space -// used to store the parts and stops charging you for storing them only after you -// either complete or abort a multipart upload. If you have configured a lifecycle -// rule to abort incomplete multipart uploads, the created multipart upload must be -// completed within the number of days specified in the bucket lifecycle -// configuration. Otherwise, the incomplete multipart upload becomes eligible for -// an abort action and Amazon S3 aborts the multipart upload. For more information, -// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// . +// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). +// You also include this upload ID in the final request to either complete or abort +// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] +// in the Amazon S3 User Guide. +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or abort +// the multipart upload. Amazon S3 frees up the space used to store the parts and +// stops charging you for storing them only after you either complete or abort a +// multipart upload. +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts the +// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. +// // - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the +// Amazon S3 User Guide. // // Request signing For request signing, multipart upload is just a series of // regular requests. You initiate a multipart upload, send one or more requests to // upload parts, and then complete the multipart upload process. You sign each // request individually. There is nothing special about signing multipart upload -// requests. For more information about signing, see Authenticating Requests -// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// in the Amazon S3 User Guide. Permissions +// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - For information about the permissions -// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. To perform a multipart upload with encryption by -// using an Amazon Web Services KMS key, the requester must have permission to the -// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are -// required because Amazon S3 must decrypt and read data from the encrypted file -// parts before it completes the multipart upload. For more information, see -// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart upload and permissions]in the Amazon S3 User Guide. +// +// To perform a multipart upload with encryption by using an Amazon Web Services +// +// KMS key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey* actions on the key. These permissions are required +// because Amazon S3 must decrypt and read data from the encrypted file parts +// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in +// the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Encryption +// // - General purpose buckets - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. Amazon S3 automatically encrypts all new @@ -91,61 +93,96 @@ import ( // in your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. If // you choose to provide your own encryption key, the request headers you provide -// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the CreateMultipartUpload request. +// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload +// request. +// // - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( // aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) // – If you want Amazon Web Services to manage the keys used to encrypt data, // specify the following headers in the request. +// // - x-amz-server-side-encryption +// // - x-amz-server-side-encryption-aws-kms-key-id +// // - x-amz-server-side-encryption-context +// // - If you specify x-amz-server-side-encryption:aws:kms , but don't provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 key) in KMS to protect the data. +// // - To perform a multipart upload with encryption by using an Amazon Web // Services KMS key, the requester must have permission to the kms:Decrypt and // kms:GenerateDataKey* actions on the key. These permissions are required // because Amazon S3 must decrypt and read data from the encrypted file parts -// before it completes the multipart upload. For more information, see Multipart -// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in +// the Amazon S3 User Guide. +// // - If your Identity and Access Management (IAM) user or role is in the same // Amazon Web Services account as the KMS key, then you must have these permissions // on the key policy. If your IAM user or role is in a different account from the // key, then you must have the permissions on both the key policy and your IAM user // or role. +// // - All GET and PUT requests for an object protected by KMS fail if you don't // make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), // or Signature Version 4. For information about configuring any of the officially -// supported Amazon Web Services SDKs and Amazon Web Services CLI, see -// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) -// in the Amazon S3 User Guide. For more information about server-side -// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. -// - Use customer-provided encryption keys (SSE-C) – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with customer-provided encryption keys (SSE-C), see -// Protecting data using server-side encryption with customer-provided encryption -// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// - Directory buckets -For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CreateMultipartUpload : -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the +// Amazon S3 User Guide. +// +// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] +// +// in the Amazon S3 User Guide. +// +// - Use customer-provided encryption keys (SSE-C) – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about server-side encryption with customer-provided +// +// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. +// +// - Directory buckets -For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to CreateMultipartUpload : +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { if params == nil { params = &CreateMultipartUploadInput{} @@ -164,30 +201,39 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip type CreateMultipartUploadInput struct { // The name of the bucket where the multipart upload is initiated and where the - // object is uploaded. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format + // object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -199,32 +245,41 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. Amazon S3 supports a set of predefined // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and - // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. By default, all objects are private. Only the owner - // has full access control. When uploading an object, you can grant access - // permissions to individual Amazon Web Services accounts or to predefined groups - // defined by Amazon S3. These permissions are then added to the access control - // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . One way to grant the permissions using the request headers is to specify a - // canned ACL with the x-amz-acl request header. + // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual Amazon + // Web Services accounts or to predefined groups defined by Amazon S3. These + // permissions are then added to the access control list (ACL) on the new object. + // For more information, see [Using ACLs]. One way to grant the permissions using the request + // headers is to specify a canned ACL with the x-amz-acl request header. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with an object action doesn’t - // affect bucket-level settings for S3 Bucket Key. This functionality is not - // supported for directory buckets. + // encryption with SSE-KMS. + // + // Specifying this header with an object action doesn’t affect bucket-level + // settings for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Specifies presentational information for the object. @@ -232,8 +287,10 @@ type CreateMultipartUploadInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language that the content is in. @@ -251,213 +308,329 @@ type CreateMultipartUploadInput struct { Expires *time.Time // Specify access permissions explicitly to give the grantee READ, READ_ACP, and - // WRITE_ACP permissions on the object. By default, all objects are private. Only - // the owner has full access control. When uploading an object, you can use this - // header to explicitly grant access permissions to specific Amazon Web Services - // accounts or groups. This header maps to specific permissions that Amazon S3 - // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // WRITE_ACP permissions on the object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantFullControl *string // Specify access permissions explicitly to allow grantee to read the object data - // and its metadata. By default, all objects are private. Only the owner has full - // access control. When uploading an object, you can use this header to explicitly - // grant access permissions to specific Amazon Web Services accounts or groups. - // This header maps to specific permissions that Amazon S3 supports in an ACL. For - // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantRead *string // Specify access permissions explicitly to allows grantee to read the object ACL. + // // By default, all objects are private. Only the owner has full access control. // When uploading an object, you can use this header to explicitly grant access // permissions to specific Amazon Web Services accounts or groups. This header maps // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantReadACP *string // Specify access permissions explicitly to allows grantee to allow grantee to - // write the ACL for the applicable object. By default, all objects are private. - // Only the owner has full access control. When uploading an object, you can use - // this header to explicitly grant access permissions to specific Amazon Web - // Services accounts or groups. This header maps to specific permissions that - // Amazon S3 supports in an ACL. For more information, see Access Control List - // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether you want to apply a legal hold to the uploaded object. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Specifies the Object Lock mode that you want to apply to the uploaded object. + // // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // Specifies the date and time when you want the Object Lock to expire. This - // functionality is not supported for directory buckets. + // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. This - // functionality is not supported for directory buckets. + // to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This functionality is not - // supported for directory buckets. + // JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption - // customer managed key to use for object encryption. This functionality is not - // supported for directory buckets. + // customer managed key to use for object encryption. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // // - For directory buckets, only the S3 Express One Zone storage class is // supported to store newly created objects. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. This functionality is not supported for directory buckets. + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -469,27 +642,33 @@ type CreateMultipartUploadOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, the response includes this header. The header // indicates when the initiated multipart upload becomes eligible for an abort - // operation. For more information, see Aborting Incomplete Multipart Uploads - // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id - // header that provides the ID of the lifecycle configuration rule that defines the - // abort action. This functionality is not supported for directory buckets. + // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // The response also includes the x-amz-abort-rule-id header that provides the ID + // of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // return the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. @@ -499,35 +678,43 @@ type CreateMultipartUploadOutput struct { Key *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // holding JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // ID for the initiated multipart upload. @@ -561,25 +748,25 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -597,6 +784,15 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -606,7 +802,7 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go index 1cb1f1512967..b8c1736b4048 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go @@ -17,62 +17,72 @@ import ( // Creates a session that establishes temporary security credentials to support // fast authentication and authorization for the Zonal endpoint APIs on directory // buckets. For more information about Zonal endpoint APIs that include the -// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) -// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory -// bucket, use the CreateSession API operation. Specifically, you grant -// s3express:CreateSession permission to a bucket in a bucket policy or an IAM -// identity-based policy. Then, you use IAM credentials to make the CreateSession -// API request on the bucket, which returns temporary security credentials that -// include the access key ID, secret access key, session token, and expiration. -// These credentials have associated permissions to access the Zonal endpoint APIs. -// After the session is created, you don’t need to use other policies to grant -// permissions to each Zonal endpoint API individually. Instead, in your Zonal -// endpoint API requests, you sign your requests by applying the temporary security -// credentials of the session to the request headers and following the SigV4 -// protocol for authentication. You also apply the session token to the -// x-amz-s3session-token request header for authorization. Temporary security -// credentials are scoped to the bucket and expire after 5 minutes. After the -// expiration time, any calls that you make with those credentials will fail. You -// must use IAM credentials again to make a CreateSession API request that -// generates a new set of temporary credentials for use. Temporary credentials -// cannot be extended or refreshed beyond the original specified interval. If you -// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission to a +// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM +// credentials to make the CreateSession API request on the bucket, which returns +// temporary security credentials that include the access key ID, secret access +// key, session token, and expiration. These credentials have associated +// permissions to access the Zonal endpoint APIs. After the session is created, you +// don’t need to use other policies to grant permissions to each Zonal endpoint API +// individually. Instead, in your Zonal endpoint API requests, you sign your +// requests by applying the temporary security credentials of the session to the +// request headers and following the SigV4 protocol for authentication. You also +// apply the session token to the x-amz-s3session-token request header for +// authorization. Temporary security credentials are scoped to the bucket and +// expire after 5 minutes. After the expiration time, any calls that you make with +// those credentials will fail. You must use IAM credentials again to make a +// CreateSession API request that generates a new set of temporary credentials for +// use. Temporary credentials cannot be extended or refreshed beyond the original +// specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes // automatically to avoid service interruptions when a session expires. We // recommend that you use the Amazon Web Services SDKs to initiate and manage -// requests to the CreateSession API. For more information, see Performance -// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) -// in the Amazon S3 User Guide. +// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 +// User Guide. +// // - You must make requests for this API operation to the Zonal endpoint. These // endpoints support virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User Guide. +// // - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject // API operation doesn't use the temporary security credentials returned from the // CreateSession API operation for authentication and authorization. For // information about authentication and authorization of the CopyObject API -// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// . +// operation on directory buckets, see [CopyObject]. +// // - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket // API operation doesn't use the temporary security credentials returned from the // CreateSession API operation for authentication and authorization. For // information about authentication and authorization of the HeadBucket API -// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// . +// operation on directory buckets, see [HeadBucket]. // // Permissions To obtain temporary security credentials, you must create a bucket // policy or an IAM identity-based policy that grants s3express:CreateSession // permission to the bucket. In a policy, you can have the s3express:SessionMode // condition key to control who can create a ReadWrite or ReadOnly session. For -// more information about ReadWrite or ReadOnly sessions, see -// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) -// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint -// APIs, the bucket policy should also grant both accounts the -// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - -// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] +// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 +// User Guide. +// +// To grant cross-account access to Zonal endpoint APIs, the bucket policy should +// also grant both accounts the s3express:CreateSession permission. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters +// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { if params == nil { params = &CreateSessionInput{} @@ -107,13 +117,14 @@ type CreateSessionInput struct { } func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CreateSessionOutput struct { - // The established temporary security credentials for the created session.. + // The established temporary security credentials for the created session. // // This member is required. Credentials *types.SessionCredentials @@ -146,25 +157,25 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -182,6 +193,15 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpCreateSessionValidationMiddleware(stack); err != nil { return err } @@ -191,7 +211,7 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go index 34645bb9a2a2..e0654a0c18c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -15,33 +15,43 @@ import ( // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. +// // - Directory buckets - If multipart uploads in a directory bucket are in // progress, you can't delete the bucket until all the in-progress multipart // uploads are aborted or completed. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format // https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in +// the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - You must have the s3:DeleteBucket // permission on the specified bucket in a policy. +// // - Directory bucket permissions - You must have the s3express:DeleteBucket // permission in an IAM identity-based policy instead of a bucket policy. // Cross-account access to this API operation isn't supported. This operation can // only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucket : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// The following operations are related to DeleteBucket : +// +// [CreateBucket] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { if params == nil { params = &DeleteBucketInput{} @@ -59,30 +69,36 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op type DeleteBucketInput struct { - // Specifies the bucket being deleted. Directory buckets - When you use this - // operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Specifies the bucket being deleted. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + // Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -116,25 +132,25 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -152,6 +168,15 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { return err } @@ -161,7 +186,7 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index 55d8fc1c9e85..e4e0d3b72a4d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -13,20 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an analytics -// configuration for the bucket (specified by the analytics configuration ID). To -// use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 analytics feature, see Amazon S3 -// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to DeleteBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to DeleteBucketAnalyticsConfiguration : +// +// [GetBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &DeleteBucketAnalyticsConfigurationInput{} @@ -63,6 +75,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { } func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -96,25 +109,25 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -132,6 +145,15 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -141,7 +163,7 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go index 9a544922e481..cbf731583543 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -13,14 +13,25 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the cors -// configuration information set for the bucket. To use this operation, you must -// have permission to perform the s3:PutBucketCORS action. The bucket owner has -// this permission by default and can grant this permission to others. For -// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. Related Resources -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// This operation is not supported by directory buckets. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// # Related Resources +// +// [PutBucketCors] +// +// [RESTOPTIONSobject] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { if params == nil { params = &DeleteBucketCorsInput{} @@ -52,6 +63,7 @@ type DeleteBucketCorsInput struct { } func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -85,25 +97,25 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -121,6 +133,15 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -130,7 +151,7 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go index 0ef6f93ec41a..fa37719c0b3d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -13,20 +13,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the DELETE action resets the default encryption for the bucket as server-side -// encryption with Amazon S3 managed keys (SSE-S3). For information about the -// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permissions to -// perform the s3:PutEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// This operation is not supported by directory buckets. +// +// This implementation of the DELETE action resets the default encryption for the +// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For +// information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 +// User Guide. +// +// To use this operation, you must have permissions to perform the +// s3:PutEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketEncryption : +// +// [PutBucketEncryption] +// +// [GetBucketEncryption] +// +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { if params == nil { params = &DeleteBucketEncryptionInput{} @@ -59,6 +68,7 @@ type DeleteBucketEncryptionInput struct { } func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -92,25 +102,25 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -128,6 +138,15 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -137,7 +156,7 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index eeaefc5f2994..2a64710c172c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -13,25 +13,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported by directory buckets. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to DeleteBucketIntelligentTieringConfiguration include: -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// [GetBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &DeleteBucketIntelligentTieringConfigurationInput{} @@ -64,6 +77,7 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { } func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -97,25 +111,25 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -133,6 +147,15 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -142,7 +165,7 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 28a93e0dcb05..8fa7d1e43858 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an inventory -// configuration (identified by the inventory ID) from the bucket. To use this -// operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . Operations related to DeleteBucketInventoryConfiguration include: -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// This operation is not supported by directory buckets. +// +// Deletes an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// [GetBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { if params == nil { params = &DeleteBucketInventoryConfigurationInput{} @@ -61,6 +75,7 @@ type DeleteBucketInventoryConfigurationInput struct { } func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -94,25 +109,25 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -130,6 +145,15 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -139,7 +163,7 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go index d7b8eb581783..4c843ee5ff85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -13,20 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the lifecycle -// configuration from the specified bucket. Amazon S3 removes all the lifecycle -// configuration rules in the lifecycle subresource associated with the bucket. -// Your objects never expire, and Amazon S3 no longer automatically deletes any -// objects on the basis of rules contained in the deleted lifecycle configuration. +// This operation is not supported by directory buckets. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the deleted +// lifecycle configuration. +// // To use this operation, you must have permission to perform the // s3:PutLifecycleConfiguration action. By default, the bucket owner has this -// permission and the bucket owner can grant this permission to others. There is -// usually some time lag before lifecycle configuration deletion is fully -// propagated to all the Amazon S3 systems. For more information about the object -// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) -// . Related actions include: -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// permission and the bucket owner can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is fully +// propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. +// +// Related actions include: +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { if params == nil { params = &DeleteBucketLifecycleInput{} @@ -58,6 +70,7 @@ type DeleteBucketLifecycleInput struct { } func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -91,25 +104,25 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -127,6 +140,15 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { return err } @@ -136,7 +158,7 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go index a6675a8801f5..645b5712b75f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -13,22 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes a metrics -// configuration for the Amazon CloudWatch request metrics (specified by the -// metrics configuration ID) from the bucket. Note that this doesn't include the -// daily storage metrics. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to DeleteBucketMetricsConfiguration : -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported by directory buckets. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to DeleteBucketMetricsConfiguration : +// +// [GetBucketMetricsConfiguration] +// +// [PutBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { if params == nil { params = &DeleteBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type DeleteBucketMetricsConfigurationInput struct { } func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -99,25 +113,25 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -135,6 +149,15 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -144,7 +167,7 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go index 1b53e08e9bf8..893b0f1d55ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -13,14 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes OwnershipControls -// for an Amazon S3 bucket. To use this operation, you must have the -// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 -// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) -// . The following operations are related to DeleteBucketOwnershipControls : -// - GetBucketOwnershipControls -// - PutBucketOwnershipControls +// This operation is not supported by directory buckets. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to DeleteBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # PutBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { if params == nil { params = &DeleteBucketOwnershipControlsInput{} @@ -52,6 +60,7 @@ type DeleteBucketOwnershipControlsInput struct { } func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -85,25 +94,25 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -121,6 +130,15 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -130,7 +148,7 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go index 0d8e77ff734d..f9e53d54959c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -13,44 +13,57 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the DeleteBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns -// a 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in +// the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the DeleteBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not using +// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:DeleteBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// # The following operations are related to DeleteBucketPolicy +// +// [CreateBucket] +// +// [DeleteObject] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucketPolicy -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { if params == nil { params = &DeleteBucketPolicyInput{} @@ -68,30 +81,36 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol type DeleteBucketPolicyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + // Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -125,25 +144,25 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -161,6 +180,15 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -170,7 +198,7 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go index 7ac11a7ba9af..c7e9803e5382 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the replication -// configuration from the bucket. To use this operation, you must have permissions -// to perform the s3:PutReplicationConfiguration action. The bucket owner has -// these permissions by default and can grant it to others. For more information -// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . It can take a while for the deletion of a replication configuration to fully -// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// This operation is not supported by directory buckets. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutReplicationConfiguration action. The bucket owner has these permissions by +// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// The following operations are related to DeleteBucketReplication : +// +// [PutBucketReplication] +// +// [GetBucketReplication] +// +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { if params == nil { params = &DeleteBucketReplicationInput{} @@ -42,7 +56,7 @@ func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBuck type DeleteBucketReplicationInput struct { - // The bucket name. + // The bucket name. // // This member is required. Bucket *string @@ -56,6 +70,7 @@ type DeleteBucketReplicationInput struct { } func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -89,25 +104,25 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -125,6 +140,15 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -134,7 +158,7 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go index 5a75a1f4ac47..3f511103b4f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -13,13 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the tags from the -// bucket. To use this operation, you must have permission to perform the +// This operation is not supported by directory buckets. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the // s3:PutBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. The following operations are related to -// DeleteBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// can grant this permission to others. +// +// The following operations are related to DeleteBucketTagging : +// +// [GetBucketTagging] +// +// [PutBucketTagging] +// +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { params = &DeleteBucketTaggingInput{} @@ -51,6 +60,7 @@ type DeleteBucketTaggingInput struct { } func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -84,25 +94,25 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -120,6 +130,15 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -129,7 +148,7 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go index dbe84fbb9fe8..58ae1d017ae3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -13,20 +13,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action removes the -// website configuration for a bucket. Amazon S3 returns a 200 OK response upon -// successfully deleting a website configuration on the specified bucket. You will -// get a 200 OK response if the website configuration you are trying to delete -// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket -// specified in the request does not exist. This DELETE action requires the -// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete -// the website configuration attached to a bucket. However, bucket owners can grant -// other users permission to delete the website configuration by writing a bucket -// policy granting them the S3:DeleteBucketWebsite permission. For more -// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . The following operations are related to DeleteBucketWebsite : -// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// This operation is not supported by directory buckets. +// +// This action removes the website configuration for a bucket. Amazon S3 returns a +// 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 +// response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a bucket. +// However, bucket owners can grant other users permission to delete the website +// configuration by writing a bucket policy granting them the +// S3:DeleteBucketWebsite permission. +// +// For more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// The following operations are related to DeleteBucketWebsite : +// +// [GetBucketWebsite] +// +// [PutBucketWebsite] +// +// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { if params == nil { params = &DeleteBucketWebsiteInput{} @@ -58,6 +69,7 @@ type DeleteBucketWebsiteInput struct { } func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -91,25 +103,25 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -127,6 +139,15 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -136,7 +157,7 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go index cd00e5998af6..9af132c8c64c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -16,13 +16,21 @@ import ( // Removes an object from a bucket. The behavior depends on the bucket's // versioning state: // -// - If versioning is enabled, the operation removes the null version (if there -// is one) of an object and inserts a delete marker, which becomes the latest -// version of the object. If there isn't a null version, Amazon S3 does not remove -// any objects but will still respond that the command was successful. +// - If bucket versioning is not enabled, the operation permanently deletes the +// object. // -// - If versioning is suspended or not enabled, the operation permanently -// deletes the object. +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete an object +// in a versioned bucket, you must include the object’s versionId in the request. +// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. +// +// - If bucket versioning is suspended, the operation removes the object that +// has a null versionId , if there is one, and inserts a delete marker that +// becomes the current version of the object. If there isn't an object with a null +// versionId , and all versions of the object have a versionId , Amazon S3 does +// not remove the object and only inserts a delete marker. To permanently delete an +// object that has a versionId , you must include the object’s versionId in the +// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is @@ -33,36 +41,43 @@ import ( // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the +// Amazon S3 User Guide. // // To remove a specific version, you must use the versionId query parameter. Using // this query parameter permanently deletes the version. If the object deleted is a // delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request // header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) -// in the Amazon S3 User Guide. To see sample requests that use versioning, see -// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) -// . Directory buckets - MFA delete is not supported by directory buckets. You can -// delete objects by explicitly calling DELETE Object or calling ( -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// ) to enable Amazon S3 to remove them for you. If you want to block users or -// accounts from removing or deleting objects from your bucket, you must deny them -// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User +// Guide. To see sample requests that use versioning, see [Sample Request]. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to +// enable Amazon S3 to remove them for you. If you want to block users or accounts +// from removing or deleting objects from your bucket, you must deny them the +// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration +// actions. +// +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always have // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -70,13 +85,23 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following action is related to DeleteObject : // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is -// related to DeleteObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// [PutObject] +// +// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html +// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { params = &DeleteObjectInput{} @@ -94,31 +119,39 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op type DeleteObjectInput struct { - // The bucket name of the bucket containing the object. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // The bucket name of the bucket containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -130,8 +163,9 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // The account ID of the expected bucket owner. If the account ID that you provide @@ -142,28 +176,34 @@ type DeleteObjectInput struct { // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. This functionality is not supported for directory buckets. + // delete enabled. + // + // This functionality is not supported for directory buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -174,16 +214,21 @@ type DeleteObjectOutput struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Returns the version ID of the delete marker created as a result of the DELETE - // operation. This functionality is not supported for directory buckets. + // operation. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -214,25 +259,25 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -250,6 +295,15 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { return err } @@ -259,7 +313,7 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go index 961b9459a4a2..473479057b02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -12,16 +12,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the entire tag -// set from the specified object. For more information about managing object tags, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . To use this operation, you must have permission to perform the -// s3:DeleteObjectTagging action. To delete tags of a specific object version, add -// the versionId query parameter in the request. You will need permission for the -// s3:DeleteObjectVersionTagging action. The following operations are related to -// DeleteObjectTagging : -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// This operation is not supported by directory buckets. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see [Object Tagging]. +// +// To use this operation, you must have permission to perform the +// s3:DeleteObjectTagging action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteObjectTagging : +// +// [PutObjectTagging] +// +// [GetObjectTagging] +// +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { if params == nil { params = &DeleteObjectTaggingInput{} @@ -39,23 +50,27 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa type DeleteObjectTaggingInput struct { - // The bucket name containing the objects from which to remove the tags. Access - // points - When you use this action with an access point, you must provide the - // alias of the access point in place of the bucket name or specify the access + // The bucket name containing the objects from which to remove the tags. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -77,6 +92,7 @@ type DeleteObjectTaggingInput struct { } func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -114,25 +130,25 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -150,6 +166,15 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -159,7 +184,7 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go index 1d1fa4327e92..fff3d896d50b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -17,47 +17,57 @@ import ( // This operation enables you to delete multiple objects from a bucket using a // single HTTP request. If you know the object keys that you want to delete, then // this operation provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. The request can contain a list of up to -// 1000 keys that you want to delete. In the XML, you provide the object key names, -// and optionally, version IDs if you want to delete a specific version of the -// object from a versioning-enabled bucket. For each key, Amazon S3 performs a -// delete operation and returns the result of that delete, success or failure, in -// the response. Note that if the object specified in the request is not found, -// Amazon S3 returns the result as deleted. +// requests, reducing per-request overhead. +// +// The request can contain a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if you +// want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. Note that if the +// object specified in the request is not found, Amazon S3 returns the result as +// deleted. +// // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the +// Amazon S3 User Guide. // // The operation supports two modes for the response: verbose and quiet. By // default, the operation uses verbose mode in which the response includes the // result of deletion of each key in your request. In quiet mode the response // includes only keys where the delete operation encountered an error. For a // successful deletion in a quiet mode, the operation does not return any -// information about the delete in the response body. When performing this action -// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, -// you must include an MFA token. If you do not provide one, the entire request -// will fail, even if there are non-versioned objects you are trying to delete. If -// you provide an invalid token, whether there are versioned keys in the request or -// not, the entire Multi-Object Delete request will fail. For information about MFA -// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) -// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by -// directory buckets. Permissions +// information about the delete in the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts to +// delete any versioned objects, you must include an MFA token. If you do not +// provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether there +// are versioned keys in the request or not, the entire Multi-Object Delete request +// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always specify // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion // permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -65,26 +75,42 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Content-MD5 request header +// // - General purpose bucket - The Content-MD5 request header is required for all // Multi-Object Delete requests. Amazon S3 uses the header value to ensure that // your request body has not been altered in transit. +// // - Directory bucket - The Content-MD5 request header or a additional checksum // request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all // Multi-Object Delete requests. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to DeleteObjects : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to DeleteObjects : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { if params == nil { params = &DeleteObjectsInput{} @@ -102,31 +128,39 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, type DeleteObjectsInput struct { - // The bucket name containing the objects to delete. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // The bucket name containing the objects to delete. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,28 +172,40 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . If you provide an individual - // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm + // parameter and uses the checksum algorithm that matches the provided value in + // x-amz-checksum-algorithm . + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -170,31 +216,38 @@ type DeleteObjectsInput struct { // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. When performing the DeleteObjects operation on an MFA delete - // enabled bucket, which attempts to delete the specified versioned objects, you - // must include an MFA token. If you don't provide an MFA token, the entire request - // will fail, even if there are non-versioned objects that you are trying to - // delete. If you provide an invalid token, whether there are versioned object keys - // in the request or not, the entire Multi-Object Delete request will fail. For - // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include an + // MFA token. If you don't provide an MFA token, the entire request will fail, even + // if there are non-versioned objects that you are trying to delete. If you provide + // an invalid token, whether there are versioned object keys in the request or not, + // the entire Multi-Object Delete request will fail. For information about MFA + // Delete, see [MFA Delete]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -210,7 +263,9 @@ type DeleteObjectsOutput struct { Errors []types.Error // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -241,25 +296,25 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -277,6 +332,15 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { return err } @@ -286,7 +350,7 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go index 488d2a797fc3..dee7f1f19465 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -13,17 +13,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to DeletePublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// This operation is not supported by directory buckets. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For +// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The following operations are related to DeletePublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { params = &DeletePublicAccessBlockInput{} @@ -55,6 +66,7 @@ type DeletePublicAccessBlockInput struct { } func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -88,25 +100,25 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -124,6 +136,15 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -133,7 +154,7 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go index 9a90a88a0d5d..c8e93d02857c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -14,26 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the accelerate subresource to return the Transfer -// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 -// Transfer Acceleration is a bucket-level feature that enables you to perform -// faster data transfers to and from Amazon S3. To use this operation, you must -// have permission to perform the s3:GetAccelerateConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an -// existing bucket to Enabled or Suspended by using the -// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) -// operation. A GET accelerate request does not return a state value for a bucket -// that has no transfer acceleration state. A bucket has no Transfer Acceleration -// state if a state has never been set on the bucket. For more information about -// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAccelerateConfiguration : -// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// This operation is not supported by directory buckets. +// +// This implementation of the GET action uses the accelerate subresource to return +// the Transfer Acceleration state of a bucket, which is either Enabled or +// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the +// s3:GetAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled or +// Suspended by using the [PutBucketAccelerateConfiguration] operation. +// +// A GET accelerate request does not return a state value for a bucket that has no +// transfer acceleration state. A bucket has no Transfer Acceleration state if a +// state has never been set on the bucket. +// +// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAccelerateConfiguration : +// +// [PutBucketAccelerateConfiguration] +// +// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { if params == nil { params = &GetBucketAccelerateConfigurationInput{} @@ -65,16 +75,19 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,7 +95,9 @@ func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointP type GetBucketAccelerateConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // The accelerate configuration of the bucket. @@ -116,25 +131,25 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -152,6 +167,15 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -161,7 +185,7 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go index 36747fc9cda8..73557d2eef22 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -14,26 +14,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the acl subresource to return the access control list (ACL) -// of a bucket. To use GET to return the ACL of the bucket, you must have the -// READ_ACP access to the bucket. If READ_ACP permission is granted to the -// anonymous user, you can return the ACL of the bucket without using an -// authorization header. When you use this API operation with an access point, -// provide the alias of the access point in place of the bucket name. When you use -// this API operation with an Object Lambda access point, provide the alias of the -// Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code +// This operation is not supported by directory buckets. +// +// This implementation of the GET action uses the acl subresource to return the +// access control list (ACL) of a bucket. To use GET to return the ACL of the +// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code // InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, // requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAcl : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAcl : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { if params == nil { params = &GetBucketAclInput{} @@ -51,14 +60,18 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op type GetBucketAclInput struct { - // Specifies the S3 bucket whose ACL is being requested. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // Specifies the S3 bucket whose ACL is being requested. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -72,6 +85,7 @@ type GetBucketAclInput struct { } func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,25 +126,25 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -148,6 +162,15 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { return err } @@ -157,7 +180,7 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go index 0f7922ae9de2..d3b27f9ffc18 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -14,21 +14,33 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action returns an analytics configuration (identified by the analytics -// configuration ID) from the bucket. To use this operation, you must have -// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, -// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAnalyticsConfiguration : -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// This operation is not supported by directory buckets. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAnalyticsConfiguration : +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &GetBucketAnalyticsConfigurationInput{} @@ -65,6 +77,7 @@ type GetBucketAnalyticsConfigurationInput struct { } func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -102,25 +115,25 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -138,6 +151,15 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -147,7 +169,7 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go index 33c25aa131b6..ef33218e3783 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -14,21 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the Cross-Origin -// Resource Sharing (CORS) configuration information set for the bucket. To use -// this operation, you must have permission to perform the s3:GetBucketCORS +// This operation is not supported by directory buckets. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information set +// for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. When you use this API operation with an access point, provide the alias -// of the access point in place of the bucket name. When you use this API operation -// with an Object Lambda access point, provide the alias of the Object Lambda -// access point in place of the bucket name. If the Object Lambda access point -// alias in a request is not valid, the error code InvalidAccessPointAliasError is -// returned. For more information about InvalidAccessPointAliasError , see List of -// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// . The following operations are related to GetBucketCors : -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// others. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. +// +// The following operations are related to GetBucketCors : +// +// [PutBucketCors] +// +// [DeleteBucketCors] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { if params == nil { params = &GetBucketCorsInput{} @@ -46,14 +61,18 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, type GetBucketCorsInput struct { - // The bucket name for which to get the cors configuration. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The bucket name for which to get the cors configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -67,6 +86,7 @@ type GetBucketCorsInput struct { } func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -105,25 +125,25 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -141,6 +161,15 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -150,7 +179,7 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go index c8be5dd0019d..8e768fe0d0d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -14,20 +14,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the default -// encryption configuration for an Amazon S3 bucket. By default, all buckets have a -// default encryption configuration that uses server-side encryption with Amazon S3 -// managed keys (SSE-S3). For information about the bucket default encryption -// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permission to -// perform the s3:GetEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to GetBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// This operation is not supported by directory buckets. +// +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). For information +// about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// To use this operation, you must have permission to perform the +// s3:GetEncryptionConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The following operations are related to GetBucketEncryption : +// +// [PutBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { if params == nil { params = &GetBucketEncryptionInput{} @@ -60,6 +69,7 @@ type GetBucketEncryptionInput struct { } func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -97,25 +107,25 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -133,6 +143,15 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -142,7 +161,7 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index a3531f98394b..def1765a6bf0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported by directory buckets. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to GetBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &GetBucketIntelligentTieringConfigurationInput{} @@ -65,6 +78,7 @@ type GetBucketIntelligentTieringConfigurationInput struct { } func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -102,25 +116,25 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -138,6 +152,15 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -147,7 +170,7 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go index 3fe6f9868c1a..8e09aec2c355 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -14,18 +14,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns an inventory -// configuration (identified by the inventory configuration ID) from the bucket. To -// use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . The following operations are related to GetBucketInventoryConfiguration : -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// The following operations are related to GetBucketInventoryConfiguration : +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { if params == nil { params = &GetBucketInventoryConfigurationInput{} @@ -62,6 +76,7 @@ type GetBucketInventoryConfigurationInput struct { } func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -99,25 +114,25 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -135,6 +150,15 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -144,7 +168,7 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go index 4cc9eff88485..4d3e018d08d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -14,31 +14,51 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Bucket lifecycle -// configuration now supports specifying a lifecycle rule using an object key name -// prefix, one or more object tags, or a combination of both. Accordingly, this -// section describes the latest API. The response describes the new filter element -// that you can use to specify a filter to select a subset of objects to which the -// rule applies. If you are using a previous version of the lifecycle -// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// . Returns the lifecycle configuration information set on the bucket. For -// information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// . To use this operation, you must have permission to perform the +// This operation is not supported by directory buckets. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API. The +// previous version of the API supported filtering based only on an object key name +// prefix, which is supported for backward compatibility. For the related API +// description, see [GetBucketLifecycle]. Accordingly, this section describes the latest API. The +// response describes the new filter element that you can use to specify a filter +// to select a subset of objects to which the rule applies. If you are using a +// previous version of the lifecycle configuration, it still works. For the earlier +// action, +// +// Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see [Object Lifecycle Management]. +// +// To use this operation, you must have permission to perform the // s3:GetLifecycleConfiguration action. The bucket owner has this permission, by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . GetBucketLifecycleConfiguration has the following special error: +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// GetBucketLifecycleConfiguration has the following special error: +// // - Error code: NoSuchLifecycleConfiguration +// // - Description: The lifecycle configuration does not exist. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // // The following operations are related to GetBucketLifecycleConfiguration : -// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// [GetBucketLifecycle] +// +// [PutBucketLifecycle] +// +// [DeleteBucketLifecycle] +// +// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { params = &GetBucketLifecycleConfigurationInput{} @@ -70,6 +90,7 @@ type GetBucketLifecycleConfigurationInput struct { } func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -107,25 +128,25 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -143,6 +164,15 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -152,7 +182,7 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go index e94875fe5e71..6ff8e95779a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -20,23 +20,34 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns the Region the -// bucket resides in. You set the bucket's Region using the LocationConstraint -// request parameter in a CreateBucket request. For more information, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. When you use this API operation with -// an Object Lambda access point, provide the alias of the Object Lambda access -// point in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// to return the Region that a bucket resides in. For backward compatibility, -// Amazon S3 continues to support GetBucketLocation. The following operations are -// related to GetBucketLocation : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// This operation is not supported by directory buckets. +// +// Returns the Region the bucket resides in. You set the bucket's Region using the +// LocationConstraint request parameter in a CreateBucket request. For more +// information, see [CreateBucket]. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For +// backward compatibility, Amazon S3 continues to support GetBucketLocation. +// +// The following operations are related to GetBucketLocation : +// +// [GetObject] +// +// [CreateBucket] +// +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { if params == nil { params = &GetBucketLocationInput{} @@ -54,14 +65,18 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio type GetBucketLocationInput struct { - // The name of the bucket for which to get the location. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the location. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -75,6 +90,7 @@ type GetBucketLocationInput struct { } func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,8 +98,10 @@ func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLocationOutput struct { // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // . Buckets in Region us-east-1 have a LocationConstraint of null . + // supported location constraints by Region, see [Regions and Endpoints]. Buckets in Region us-east-1 + // have a LocationConstraint of null . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint types.BucketLocationConstraint // Metadata pertaining to the operation's result. @@ -114,25 +132,25 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -153,6 +171,15 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { return err } @@ -162,7 +189,7 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go index 5bbca7e4cb64..fec538ee64ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -14,11 +14,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the logging -// status of a bucket and the permissions users have to view and modify that -// status. The following operations are related to GetBucketLogging : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// This operation is not supported by directory buckets. +// +// Returns the logging status of a bucket and the permissions users have to view +// and modify that status. +// +// The following operations are related to GetBucketLogging : +// +// [CreateBucket] +// +// [PutBucketLogging] +// +// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { if params == nil { params = &GetBucketLoggingInput{} @@ -50,6 +58,7 @@ type GetBucketLoggingInput struct { } func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -57,8 +66,10 @@ func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLoggingOutput struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *types.LoggingEnabled // Metadata pertaining to the operation's result. @@ -89,25 +100,25 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -125,6 +136,15 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -134,7 +154,7 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go index 913b9a04d290..13cf42f403c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -14,21 +14,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets a metrics -// configuration (specified by the metrics configuration ID) from the bucket. Note -// that this doesn't include the daily storage metrics. To use this operation, you -// must have permissions to perform the s3:GetMetricsConfiguration action. The -// bucket owner has this permission by default. The bucket owner can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to GetBucketMetricsConfiguration : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported by directory buckets. +// +// Gets a metrics configuration (specified by the metrics configuration ID) from +// the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to GetBucketMetricsConfiguration : +// +// [PutBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { if params == nil { params = &GetBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type GetBucketMetricsConfigurationInput struct { } func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -103,25 +117,25 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -139,6 +153,15 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -148,7 +171,7 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go index 67a35d973ae7..66915f732fc3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -14,24 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the notification -// configuration of a bucket. If notifications are not enabled on the bucket, the -// action returns an empty NotificationConfiguration element. By default, you must -// be the bucket owner to read the notification configuration of a bucket. However, -// the bucket owner can use a bucket policy to grant permission to other users to -// read this configuration with the s3:GetBucketNotification permission. When you -// use this API operation with an access point, provide the alias of the access -// point in place of the bucket name. When you use this API operation with an -// Object Lambda access point, provide the alias of the Object Lambda access point -// in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about setting and reading the notification configuration -// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . The following action is related to GetBucketNotification : -// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// This operation is not supported by directory buckets. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant +// permission to other users to read this configuration with the +// s3:GetBucketNotification permission. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about setting and reading the notification configuration +// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. +// +// The following action is related to GetBucketNotification : +// +// [PutBucketNotification] +// +// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { if params == nil { params = &GetBucketNotificationConfigurationInput{} @@ -49,15 +63,18 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params type GetBucketNotificationConfigurationInput struct { - // The name of the bucket for which to get the notification configuration. When - // you use this API operation with an access point, provide the alias of the access - // point in place of the bucket name. When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the notification configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -71,6 +88,7 @@ type GetBucketNotificationConfigurationInput struct { } func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -122,25 +140,25 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -158,6 +176,15 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -167,7 +194,7 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go index dca55854e961..f8d2486b555a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -14,14 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:GetBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// . The following operations are related to GetBucketOwnershipControls : -// - PutBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported by directory buckets. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to GetBucketOwnershipControls : +// +// # PutBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { if params == nil { params = &GetBucketOwnershipControlsInput{} @@ -53,6 +61,7 @@ type GetBucketOwnershipControlsInput struct { } func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -91,25 +100,25 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -127,6 +136,15 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -136,7 +154,7 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go index ff42b705beff..1a2f25193af1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -13,47 +13,61 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the GetBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in +// the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the GetBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:GetBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:GetBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following action is related to GetBucketPolicy : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The following action is related to GetBucketPolicy : +// +// [GetObject] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { params = &GetBucketPolicyInput{} @@ -71,39 +85,49 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp type GetBucketPolicyInput struct { - // The bucket name to get the bucket policy for. Directory buckets - When you use - // this operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide Access points - When you use this API operation with - // an access point, provide the alias of the access point in place of the bucket - // name. Object Lambda access points - When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + // Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Access points and Object Lambda access points are not supported by directory // buckets. // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -141,25 +165,25 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -177,6 +201,15 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -186,7 +219,7 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go index 6acf706fc17a..57cee1fb3fc5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -14,18 +14,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the policy -// status for an Amazon S3 bucket, indicating whether the bucket is public. In -// order to use this operation, you must have the s3:GetBucketPolicyStatus -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For more information about when Amazon S3 considers a bucket public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetBucketPolicyStatus : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// This operation is not supported by directory buckets. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the +// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +// permissions, see [Specifying Permissions in a Policy]. +// +// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. +// +// The following operations are related to GetBucketPolicyStatus : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { if params == nil { params = &GetBucketPolicyStatusInput{} @@ -57,6 +70,7 @@ type GetBucketPolicyStatusInput struct { } func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -94,25 +108,25 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -130,6 +144,15 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { return err } @@ -139,7 +162,7 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go index 8db927c1327e..10a0f6a1091e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -14,21 +14,37 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the replication -// configuration of a bucket. It can take a while to propagate the put or delete a -// replication configuration to all Amazon S3 systems. Therefore, a get request -// soon after put or delete can return a wrong result. For information about -// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. This action requires permissions for the -// s3:GetReplicationConfiguration action. For more information about permissions, -// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . If you include the Filter element in a replication configuration, you must -// also include the DeleteMarkerReplication and Priority elements. The response -// also returns those elements. For information about GetBucketReplication errors, -// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// This operation is not supported by directory buckets. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete can +// return a wrong result. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see [Using Bucket Policies and User Policies]. +// +// If you include the Filter element in a replication configuration, you must also +// include the DeleteMarkerReplication and Priority elements. The response also +// returns those elements. +// +// For information about GetBucketReplication errors, see [List of replication-related error codes] +// // The following operations are related to GetBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// [PutBucketReplication] +// +// [DeleteBucketReplication] +// +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { if params == nil { params = &GetBucketReplicationInput{} @@ -60,6 +76,7 @@ type GetBucketReplicationInput struct { } func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -98,25 +115,25 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -134,6 +151,15 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -143,7 +169,7 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go index 37c964509d33..2a4c05842812 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -14,11 +14,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the request -// payment configuration of a bucket. To use this version of the operation, you -// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to GetBucketRequestPayment : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// This operation is not supported by directory buckets. +// +// Returns the request payment configuration of a bucket. To use this version of +// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to GetBucketRequestPayment : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { if params == nil { params = &GetBucketRequestPaymentInput{} @@ -50,6 +56,7 @@ type GetBucketRequestPaymentInput struct { } func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -87,25 +94,25 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -123,6 +130,15 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -132,7 +148,7 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go index 4c2761be88d9..61c62de03ae1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -14,17 +14,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag set -// associated with the bucket. To use this operation, you must have permission to -// perform the s3:GetBucketTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. GetBucketTagging has the -// following special error: +// This operation is not supported by directory buckets. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the +// s3:GetBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. +// +// GetBucketTagging has the following special error: +// // - Error code: NoSuchTagSet +// // - Description: There is no tag set associated with the bucket. // // The following operations are related to GetBucketTagging : -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [PutBucketTagging] +// +// [DeleteBucketTagging] +// +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { params = &GetBucketTaggingInput{} @@ -56,6 +67,7 @@ type GetBucketTaggingInput struct { } func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -95,25 +107,25 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -131,6 +143,15 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -140,7 +161,7 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go index 55cad6297dc5..86fcc67bb87d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -14,15 +14,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the versioning -// state of a bucket. To retrieve the versioning state of a bucket, you must be the -// bucket owner. This implementation also returns the MFA Delete status of the -// versioning state. If the MFA Delete status is enabled , the bucket owner must -// use an authentication device to change the versioning state of the bucket. The -// following operations are related to GetBucketVersioning : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// This operation is not supported by directory buckets. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning state. +// If the MFA Delete status is enabled , the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning : +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { if params == nil { params = &GetBucketVersioningInput{} @@ -54,6 +66,7 @@ type GetBucketVersioningInput struct { } func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -96,25 +109,25 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -132,6 +145,15 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -141,7 +163,7 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go index f0ebf2b0d281..72254092aeae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -14,17 +14,26 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the website -// configuration for a bucket. To host website on Amazon S3, you can configure a -// bucket as website by adding a website configuration. For more information about -// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This GET action requires the S3:GetBucketWebsite permission. By default, only +// This operation is not supported by directory buckets. +// +// Returns the website configuration for a bucket. To host website on Amazon S3, +// you can configure a bucket as website by adding a website configuration. For +// more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// This GET action requires the S3:GetBucketWebsite permission. By default, only // the bucket owner can read the bucket website configuration. However, bucket // owners can allow other users to read the website configuration by writing a -// bucket policy granting them the S3:GetBucketWebsite permission. The following -// operations are related to GetBucketWebsite : -// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to GetBucketWebsite : +// +// [DeleteBucketWebsite] +// +// [PutBucketWebsite] +// +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { if params == nil { params = &GetBucketWebsiteInput{} @@ -56,6 +65,7 @@ type GetBucketWebsiteInput struct { } func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -103,25 +113,25 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -139,6 +149,15 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -148,7 +167,7 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go index d2dc15c7cd7d..088c48c7ca9d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -16,100 +16,141 @@ import ( "time" ) -// Retrieves an object from Amazon S3. In the GetObject request, specify the full -// key name for the object. General purpose buckets - Both the virtual-hosted-style -// requests and the path-style requests are supported. For a virtual hosted-style -// request example, if you have the object photos/2006/February/sample.jpg , -// specify the object key name as /photos/2006/February/sample.jpg . For a -// path-style request example, if you have the object -// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the -// object key name as /examplebucket/photos/2006/February/sample.jpg . For more -// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) -// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style -// requests are supported. For a virtual hosted-style request example, if you have -// the object photos/2006/February/sample.jpg in the bucket named +// Retrieves an object from Amazon S3. +// +// In the GetObject request, specify the full key name for the object. +// +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg , specify the object key +// name as /photos/2006/February/sample.jpg . For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket , specify the object key name as +// /examplebucket/photos/2006/February/sample.jpg . For more information about +// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. +// +// Directory buckets - Only virtual-hosted-style requests are supported. For a +// virtual hosted-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named // examplebucket--use1-az5--x-s3 , specify the object key name as // /photos/2006/February/sample.jpg . Also, when you make requests to this API // operation, your requests are sent to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions // - General purpose bucket permissions - You must have the required permissions // in a policy. To use GetObject , you must have the READ access to the object // (or version). If you grant READ access to the anonymous user, the GetObject // operation returns the object without using an authorization header. For more -// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If you include a versionId in your request -// header, you must have the s3:GetObjectVersion permission to access a specific -// version of an object. The s3:GetObject permission is not required in this -// scenario. If you request the current version of an object without a specific -// versionId in the request header, only the s3:GetObject permission is required. -// The s3:GetObjectVersion permission is not required in this scenario. If the -// object that you request doesn’t exist, the error that Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Access Denied error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. +// +// If you include a versionId in your request header, you must have the +// +// s3:GetObjectVersion permission to access a specific version of an object. The +// s3:GetObject permission is not required in this scenario. +// +// If you request the current version of an object without a specific versionId in +// +// the request header, only the s3:GetObject permission is required. The +// s3:GetObjectVersion permission is not required in this scenario. +// +// If the object that you request doesn’t exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Storage classes If the object you are retrieving is stored in the S3 Glacier // Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the // S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep // Archive Access tier, before you can retrieve the object you must first restore a -// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the -// S3 Express One Zone storage class is supported to store newly created objects. -// Unsupported storage class values won't write a destination object and will -// respond with the HTTP status code 400 Bad Request . Encryption Encryption -// request headers, like x-amz-server-side-encryption , should not be sent for the -// GetObject requests, if your object uses server-side encryption with Amazon S3 -// managed encryption keys (SSE-S3), server-side encryption with Key Management -// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon -// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject -// requests for the object that uses these types of keys, you’ll get an HTTP 400 -// Bad Request error. Overriding response header values through the request There -// are times when you want to override certain response header values of a -// GetObject response. For example, you might override the Content-Disposition -// response header value through your GetObject request. You can override values -// for a set of response headers. These modified response header values are -// included only in a successful response, that is, when the HTTP status code 200 -// OK is returned. The headers you can override using the following query -// parameters in the request are a subset of the headers that Amazon S3 accepts -// when you create an object. The response headers that you can override for the -// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , -// Content-Language , Content-Type , and Expires . To override values for a set of -// response headers in the GetObject response, you can use the following query -// parameters in the request. +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, only the S3 Express One Zone storage +// class is supported to store newly created objects. Unsupported storage class +// values won't write a destination object and will respond with the HTTP status +// code 400 Bad Request . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for the GetObject requests, if your object uses server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you +// include the header in your GetObject requests for the object that uses these +// types of keys, you’ll get an HTTP 400 Bad Request error. +// +// Overriding response header values through the request There are times when you +// want to override certain response header values of a GetObject response. For +// example, you might override the Content-Disposition response header value +// through your GetObject request. +// +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the HTTP +// status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. +// +// The response headers that you can override for the GetObject response are +// Cache-Control , Content-Disposition , Content-Encoding , Content-Language , +// Content-Type , and Expires . +// +// To override values for a set of response headers in the GetObject response, you +// can use the following query parameters in the request. +// // - response-cache-control +// // - response-content-disposition +// // - response-content-encoding +// // - response-content-language +// // - response-content-type +// // - response-expires // // When you use these parameters, you must sign the request by using either an // Authorization header or a presigned URL. These parameters cannot be used with an -// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// unsigned (anonymous) request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// // The following operations are related to GetObject : -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [ListBuckets] +// +// [GetObjectAcl] +// +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { if params == nil { params = &GetObjectInput{} @@ -127,35 +168,44 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns . type GetObjectInput struct { - // The bucket name containing the object. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this - // action with an Object Lambda access point, you must direct requests to the - // Object Lambda access point hostname. The Object Lambda access point hostname - // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // The bucket name containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // // Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -174,37 +224,55 @@ type GetObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified in this header; otherwise, return a 412 Precondition Failed error. If - // both of the If-Match and If-Unmodified-Since headers are present in the request - // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since - // condition evaluates to false ; then, S3 returns 200 OK and the data requested. - // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 Not Modified error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: If-None-Match - // condition evaluates to false , and; If-Modified-Since condition evaluates to - // true ; then, S3 returns 304 Not Modified status code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified in this header; otherwise, return a 304 Not Modified error. If both - // of the If-None-Match and If-Modified-Since headers are present in the request - // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since - // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status - // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified HTTP status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 Precondition Failed error. If both of the If-Match and - // If-Unmodified-Since headers are present in the request as follows: If-Match - // condition evaluates to true , and; If-Unmodified-Since condition evaluates to - // false ; then, S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -213,18 +281,23 @@ type GetObjectInput struct { PartNumber *int32 // Downloads the specified byte range of an object. For more information about the - // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) - // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range Range *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Sets the Cache-Control header of the response. @@ -245,72 +318,97 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time - // Specifies the algorithm to use when decrypting the object (for example, AES256 - // ). If you encrypt an object by using server-side encryption with - // customer-provided encryption keys (SSE-C) when you store the object in Amazon - // S3, then when you GET the object, you must use the following headers: + // Specifies the algorithm to use when decrypting the object (for example, AES256 ). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // Specifies the customer-provided encryption key that you originally provided for // Amazon S3 to encrypt the data before storing it. This value is used to decrypt // the object when recovering it and must match the one used when storing the data. // The key must be appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object - // by using server-side encryption with customer-provided encryption keys (SSE-C) - // when you store the object in Amazon S3, then when you GET the object, you must - // use the following headers: + // x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. If you encrypt - // an object by using server-side encryption with customer-provided encryption keys - // (SSE-C) when you store the object in Amazon S3, then when you GET the object, - // you must use the following headers: + // to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. By default, the - // GetObject operation returns the current version of an object. To return a - // different version, use the versionId subresource. + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // // - If you include a versionId in your request header, you must have the // s3:GetObjectVersion permission to access a specific version of an object. The // s3:GetObject permission is not required in this scenario. + // // - If you request the current version of an object without a specific versionId // in the request header, only the s3:GetObject permission is required. The // s3:GetObjectVersion permission is not required in this scenario. + // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. - // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) - // . + // + // For more information about versioning, see [PutBucketVersioning]. + // + // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html VersionId *string noSmithyDocumentSerde } func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -325,35 +423,40 @@ type GetObjectOutput struct { Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Specifies presentational information for the object. @@ -378,9 +481,11 @@ type GetObjectOutput struct { // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the // response. + // // - If the specified version in the request is a delete marker, the response // returns a 405 Method Not Allowed error and the Last-Modified: timestamp // response header. @@ -390,20 +495,33 @@ type GetObjectOutput struct { // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory - // buckets. + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time - // Date and time when the object was last modified. General purpose buckets - When - // you specify a versionId of the object in your request, if the specified version - // in the request is a delete marker, the response returns a 405 Method Not Allowed - // error and the Last-Modified: timestamp response header. + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -415,20 +533,25 @@ type GetObjectOutput struct { // are prefixed with x-amz-meta- . This can happen if you create metadata using an // API like SOAP that supports more flexible metadata than the REST API. For // example, using SOAP, you can create metadata whose values are not legal HTTP - // headers. This functionality is not supported for directory buckets. + // headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. This - // functionality is not supported for directory buckets. + // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that's currently in place for this object. This - // functionality is not supported for directory buckets. + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when this object's Object Lock will expire. This - // functionality is not supported for directory buckets. + // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -436,63 +559,78 @@ type GetObjectOutput struct { PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. This functionality is not supported - // for directory buckets. + // source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration action and expiration time of the - // restored object copy. This functionality is not supported for directory buckets. - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // restored object copy. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. Directory buckets - // - Only the S3 Express One Zone storage class is supported by directory buckets - // to store objects. + // for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) - // to retrieve the tag set associated with an object. This functionality is not - // supported for directory buckets. + // permission to read object tags. + // + // You can use [GetObjectTagging] to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. + // + // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html TagCount *int32 - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -523,25 +661,25 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -556,6 +694,15 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectValidationMiddleware(stack); err != nil { return err } @@ -565,7 +712,7 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go index 3b2a16872ac1..debdaab0e31d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -13,24 +13,39 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the access -// control list (ACL) of an object. To use this operation, you must have -// s3:GetObjectAcl permissions or READ_ACP access to the object. For more -// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on -// Outposts. By default, GET returns ACL information about the current version of -// an object. To return ACL information about a different version, use the -// versionId subresource. If your bucket uses the bucket owner enforced setting for -// S3 Object Ownership, requests to read ACLs are still supported and return the +// This operation is not supported by directory buckets. +// +// Returns the access control list (ACL) of an object. To use this operation, you +// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For +// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId +// subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetObjectAcl : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [DeleteObject] +// +// [PutObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { if params == nil { params = &GetObjectAclInput{} @@ -49,6 +64,7 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -56,8 +72,9 @@ type GetObjectAclInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -76,20 +93,24 @@ type GetObjectAclInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -100,11 +121,13 @@ type GetObjectAclOutput struct { // A list of grants. Grants []types.Grant - // Container for the bucket owner's display name and ID. + // Container for the bucket owner's display name and ID. Owner *types.Owner // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -135,25 +158,25 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -171,6 +194,15 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { return err } @@ -180,7 +212,7 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go index ed53ae7b31e9..98ebb0d7be56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -16,32 +16,39 @@ import ( // Retrieves all the metadata from an object without returning the object itself. // This operation is useful if you're interested only in an object's metadata. +// // GetObjectAttributes combines the functionality of HeadObject and ListParts . All // of the data returned with each of those individual calls can be returned with a -// single call to GetObjectAttributes . Directory buckets - For directory buckets, -// you must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format +// single call to GetObjectAttributes . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - To use GetObjectAttributes , you must // have READ access to the object. The permissions that you need to use this // operation with depend on whether the bucket is versioned. If the bucket is // versioned, you need both the s3:GetObjectVersion and // s3:GetObjectVersionAttributes permissions for this operation. If the bucket is // not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If the object that you request does not exist, the -// error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. +// permissions. For more information, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. If the +// object that you request does not exist, the error Amazon S3 returns depends on +// whether you also have the s3:ListBucket permission. +// // - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an // HTTP status code 404 Not Found ("no such key") error. +// // - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP // status code 403 Forbidden ("access denied") error. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -49,8 +56,7 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -61,49 +67,86 @@ import ( // want to specify the encryption method. If you include this header in a GET // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't -// enabled and supported for directory buckets. For this API operation, only the -// null value of the version ID is supported by directory buckets. You can only -// specify null to the versionId query parameter in the request. Conditional -// request headers Consider the following when using request headers: +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, only server-side +// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// Versioning Directory buckets - S3 Versioning isn't enabled and supported for +// directory buckets. For this API operation, only the null value of the version +// ID is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// Conditional request headers Consider the following when using request headers: +// // - If both of the If-Match and If-Unmodified-Since headers are present in the // request as follows, then Amazon S3 returns the HTTP status code 200 OK and the // data requested: +// // - If-Match condition evaluates to true . -// - If-Unmodified-Since condition evaluates to false . For more information -// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . +// +// - If-Unmodified-Since condition evaluates to false . +// +// For more information about conditional requests, see [RFC 7232]. +// // - If both of the If-None-Match and If-Modified-Since headers are present in // the request as follows, then Amazon S3 returns the HTTP status code 304 Not // Modified : +// // - If-None-Match condition evaluates to false . -// - If-Modified-Since condition evaluates to true . For more information about -// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to GetObjectAttributes : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) -// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) -// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - If-Modified-Since condition evaluates to true . +// +// For more information about conditional requests, see [RFC 7232]. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following actions are related to GetObjectAttributes : +// +// [GetObject] +// +// [GetObjectAcl] +// +// [GetObjectLegalHold] +// +// [GetObjectLockConfiguration] +// +// [GetObjectRetention] +// +// [GetObjectTagging] +// +// [HeadObject] +// +// [ListParts] +// +// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [RFC 7232]: https://tools.ietf.org/html/rfc7232 +// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html +// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { if params == nil { params = &GetObjectAttributesInput{} @@ -121,31 +164,39 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri type GetObjectAttributesInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -177,32 +228,38 @@ type GetObjectAttributesInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // The version ID used to reference a specific version of the object. S3 - // Versioning isn't enabled and supported for directory buckets. For this API + // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this API // operation, only the null value of the version ID is supported by directory // buckets. You can only specify null to the versionId query parameter in the // request. @@ -212,6 +269,7 @@ type GetObjectAttributesInput struct { } func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -223,6 +281,7 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was ( true ) or was not ( false ) a // delete marker. If false , this response header does not appear in the response. + // // This functionality is not supported for directory buckets. DeleteMarker *bool @@ -240,18 +299,25 @@ type GetObjectAttributesOutput struct { ObjectSize *int64 // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by // directory buckets to store objects. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The version ID of the object. This functionality is not supported for directory - // buckets. + // The version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -282,25 +348,25 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -318,6 +384,15 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { return err } @@ -327,7 +402,7 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go index 3f7af220230e..0c39051e5a75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets an object's current -// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectLegalHold : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported by directory buckets. +// +// Gets an object's current legal hold status. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { if params == nil { params = &GetObjectLegalHoldInput{} @@ -36,15 +43,18 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +73,12 @@ type GetObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object whose legal hold status you want to retrieve. @@ -76,6 +88,7 @@ type GetObjectLegalHoldInput struct { } func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -113,25 +126,25 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -149,6 +162,15 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -158,7 +180,7 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go index 1ba436d53ce8..28ea0fd12942 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -13,12 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the Object Lock -// configuration for a bucket. The rule specified in the Object Lock configuration -// will be applied by default to every new object placed in the specified bucket. -// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . The following action is related to GetObjectLockConfiguration : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported by directory buckets. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object placed +// in the specified bucket. For more information, see [Locking Objects]. +// +// The following action is related to GetObjectLockConfiguration : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { if params == nil { params = &GetObjectLockConfigurationInput{} @@ -36,16 +42,18 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje type GetObjectLockConfigurationInput struct { - // The bucket whose Object Lock configuration you want to retrieve. Access points - // - When you use this action with an access point, you must provide the alias of - // the access point in place of the bucket name or specify the access point ARN. - // When using the access point ARN, you must direct requests to the access point - // hostname. The access point hostname takes the form + // The bucket whose Object Lock configuration you want to retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -59,6 +67,7 @@ type GetObjectLockConfigurationInput struct { } func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -96,25 +105,25 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -132,6 +141,15 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -141,7 +159,7 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go index 1dea7d8abd15..678afb91b28f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves an object's -// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectRetention : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported by directory buckets. +// +// Retrieves an object's retention settings. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { if params == nil { params = &GetObjectRetentionInput{} @@ -36,15 +43,18 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +73,12 @@ type GetObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID for the object whose retention settings you want to retrieve. @@ -76,6 +88,7 @@ type GetObjectRetentionInput struct { } func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -113,25 +126,25 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -149,6 +162,15 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -158,7 +180,7 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go index c020e9bd7513..bd70ccadd649 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -13,20 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag-set of an -// object. You send the GET request against the tagging subresource associated with -// the object. To use this operation, you must have permission to perform the +// This operation is not supported by directory buckets. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the // s3:GetObjectTagging action. By default, the GET action returns information about // current version of an object. For a versioned bucket, you can have multiple // versions of an object in your bucket. To retrieve tags of any other version, use // the versionId query parameter. You also need permission for the -// s3:GetObjectVersionTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. For information about the -// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . The following actions are related to GetObjectTagging : -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// s3:GetObjectVersionTagging action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see [Object Tagging]. +// +// The following actions are related to GetObjectTagging : +// +// [DeleteObjectTagging] +// +// [GetObjectAttributes] +// +// [PutObjectTagging] +// +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { if params == nil { params = &GetObjectTaggingInput{} @@ -45,6 +60,7 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -52,15 +68,18 @@ type GetObjectTaggingInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -79,10 +98,12 @@ type GetObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object for which to get the tagging information. @@ -92,6 +113,7 @@ type GetObjectTaggingInput struct { } func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -134,25 +156,25 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -170,6 +192,15 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -179,7 +210,7 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go index 6689ef976654..5be63e2e3323 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -14,14 +14,24 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns torrent files -// from a bucket. BitTorrent can save you bandwidth when you're distributing large -// files. You can get torrent only for objects that are less than 5 GB in size, and -// that are not encrypted using server-side encryption with a customer-provided -// encryption key. To use GET, you must have READ access to the object. This -// functionality is not supported for Amazon S3 on Outposts. The following action -// is related to GetObjectTorrent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// This operation is not supported by directory buckets. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. +// +// You can get torrent only for objects that are less than 5 GB in size, and that +// are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent : +// +// [GetObject] +// +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { params = &GetObjectTorrentInput{} @@ -58,16 +68,19 @@ type GetObjectTorrentInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -78,7 +91,9 @@ type GetObjectTorrentOutput struct { Body io.ReadCloser // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -109,25 +124,25 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -142,6 +157,15 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { return err } @@ -151,7 +175,7 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go index 0ae12e397cd8..3eac86cb6c27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -14,22 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:GetBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported by directory buckets. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For +// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock settings are different between the bucket and the account, // Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetPublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to GetPublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [PutPublicAccessBlock] +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { params = &GetPublicAccessBlockInput{} @@ -62,6 +78,7 @@ type GetPublicAccessBlockInput struct { } func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -100,25 +117,25 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -136,6 +153,15 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -145,7 +171,7 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go index 8a18ff851e70..e53d4f4d0410 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -19,42 +19,54 @@ import ( // You can use this operation to determine if a bucket exists and if you have // permission to access it. The action returns a 200 OK if the bucket exists and -// you have permission to access it. If the bucket does not exist or you do not -// have permission to access it, the HEAD request returns a generic 400 Bad Request -// , 403 Forbidden or 404 Not Found code. A message body is not included, so you -// cannot determine the exception beyond these error codes. Directory buckets - You -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// you have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, the +// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found +// code. A message body is not included, so you cannot determine the exception +// beyond these HTTP response codes. +// +// Directory buckets - You must make requests for this API operation to the Zonal +// endpoint. These endpoints support virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory bucket - You must use IAM credentials to authenticate and authorize +// are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User Guide. +// +// Authentication and authorization All HeadBucket requests must be authenticated +// and signed by using IAM credentials (access key ID and secret access key for the +// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source +// , must be signed. For more information, see [REST Authentication]. +// +// Directory bucket - You must use IAM credentials to authenticate and authorize // your access to the HeadBucket API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions +// // - General purpose bucket permissions - To use this operation, you must have // permissions to perform the s3:ListBucket action. The bucket owner has this // permission by default and can grant this permission to others. For more -// information about permissions, see Managing access permissions to your Amazon -// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have the s3express:CreateSession // permission in the Action element of a policy. By default, the session is in // the ReadWrite mode. If you want to restrict the access, you can explicitly set -// the s3express:SessionMode condition key to ReadOnly on the bucket. For more -// information about example bucket policies, see Example bucket policies for S3 -// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// the s3express:SessionMode condition key to ReadOnly on the bucket. +// +// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is // Bucket_name.s3express-az_id.region.amazonaws.com . +// +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -72,36 +84,46 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns type HeadBucketInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this API - // operation with an Object Lambda access point, provide the alias of the Object - // Lambda access point in place of the bucket name. If the Object Lambda access - // point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -115,6 +137,7 @@ type HeadBucketInput struct { } func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -122,21 +145,26 @@ func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { type HeadBucketOutput struct { // Indicates whether the bucket name used in the request is an access point alias. + // // This functionality is not supported for directory buckets. AccessPointAlias *bool - // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket is created. An - // example AZ ID value is usw2-az2 . This functionality is only supported by - // directory buckets. + // The name of the location where the bucket will be created. + // + // For directory buckets, the AZ ID of the Availability Zone where the bucket is + // created. An example AZ ID value is usw2-az1 . + // + // This functionality is only supported by directory buckets. BucketLocationName *string - // The type of location where the bucket is created. This functionality is only - // supported by directory buckets. + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. BucketLocationType types.LocationType - // The Region that the bucket is located. This functionality is not supported for - // directory buckets. + // The Region that the bucket is located. + // + // This functionality is not supported for directory buckets. BucketRegion *string // Metadata pertaining to the operation's result. @@ -167,25 +195,25 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -203,6 +231,15 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpHeadBucketValidationMiddleware(stack); err != nil { return err } @@ -212,7 +249,7 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { @@ -239,28 +276,23 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti return nil } -func (v *HeadBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadBucketAPIClient is a client that implements the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) -} - -var _ HeadBucketAPIClient = (*Client)(nil) - // BucketExistsWaiterOptions are waiter options for BucketExistsWaiter type BucketExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // BucketExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -276,12 +308,13 @@ type BucketExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -357,7 +390,16 @@ func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBuck } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -412,8 +454,17 @@ type BucketNotExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // BucketNotExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -429,12 +480,13 @@ type BucketNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -511,7 +563,16 @@ func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadB } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -556,6 +617,20 @@ func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, return true, nil } +func (v *HeadBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go index 5f7b55e56add..064734cab2ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -19,43 +19,52 @@ import ( // The HEAD operation retrieves metadata from an object without returning the // object itself. This operation is useful if you're interested only in an object's -// metadata. A HEAD request has the same options as a GET operation on an object. -// The response is identical to the GET response except that there is no response +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response // body. Because of this, if the HEAD request generates an error, it returns a // generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 // Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not -// possible to retrieve the exact exception of these error codes. Request headers -// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) -// . Directory buckets - For directory buckets, you must make requests for this API +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. +// +// Directory buckets - For directory buckets, you must make requests for this API // operation to the Zonal endpoint. These endpoints support virtual-hosted-style // requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - To use HEAD , you must have the // s3:GetObject permission. You need the relevant read object (or version) -// permission for this operation. For more information, see Actions, resources, -// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) -// in the Amazon S3 User Guide. If the object you request doesn't exist, the error -// that Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 +// User Guide. +// +// If the object you request doesn't exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -66,20 +75,26 @@ import ( // want to specify the encryption method. If you include this header in a HEAD // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, only server-side +// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// Versioning // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -95,11 +110,23 @@ import ( // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to HeadObject : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following actions are related to HeadObject : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { if params == nil { params = &HeadObjectInput{} @@ -117,31 +144,39 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns type HeadObjectInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -151,10 +186,11 @@ type HeadObjectInput struct { // This member is required. Key *string - // To retrieve the checksum, this parameter must be enabled. In addition, if you - // enable ChecksumMode and the object is encrypted with Amazon Web Services Key - // Management Service (Amazon Web Services KMS), you must have permission to use - // the kms:Decrypt action for the request to succeed. + // To retrieve the checksum, this parameter must be enabled. + // + // In addition, if you enable ChecksumMode and the object is encrypted with Amazon + // Web Services Key Management Service (Amazon Web Services KMS), you must have + // permission to use the kms:Decrypt action for the request to succeed. ChecksumMode types.ChecksumMode // The account ID of the expected bucket owner. If the account ID that you provide @@ -163,40 +199,71 @@ type HeadObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. If both of the - // If-Match and If-Unmodified-Since headers are present in the request as follows: + // specified; otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: + // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. If both of the - // If-None-Match and If-Modified-Since headers are present in the request as - // follows: + // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. If both of the If-Match - // and If-Unmodified-Since headers are present in the request as follows: + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -214,39 +281,64 @@ type HeadObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -257,13 +349,15 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string - // The archive state of the head object. This functionality is not supported for - // directory buckets. + // The archive state of the head object. + // + // This functionality is not supported for directory buckets. ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. @@ -274,8 +368,10 @@ type HeadObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -283,8 +379,10 @@ type HeadObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -292,8 +390,10 @@ type HeadObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -301,8 +401,10 @@ type HeadObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Specifies presentational information for the object. @@ -323,24 +425,36 @@ type HeadObjectOutput struct { ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. This - // functionality is not supported for directory buckets. + // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory - // buckets. + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + // Date and time when the object was last modified. LastModified *time.Time @@ -352,26 +466,34 @@ type HeadObjectOutput struct { // This is set to the number of metadata entries not returned in x-amz-meta // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. This functionality - // is not supported for directory buckets. + // can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Specifies whether a legal hold is in effect for this object. This header is // only returned if the requester has the s3:GetObjectLegalHold permission. This // header is not returned if the specified version of this object has never had a - // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // legal hold applied. For more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // The Object Lock mode, if any, that's in effect for this object. This header is // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. This - // functionality is not supported for directory buckets. + // only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -379,89 +501,121 @@ type HeadObjectOutput struct { PartsCount *int32 // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. In replication, you have - // a source bucket on which you configure replication and destination bucket or - // buckets where Amazon S3 stores object replicas. When you request an object ( - // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will - // return the x-amz-replication-status header in the response as follows: + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication and + // destination bucket or buckets where Amazon S3 stores object replicas. When you + // request an object ( GetObject ) or object metadata ( HeadObject ) from these + // buckets, Amazon S3 will return the x-amz-replication-status header in the + // response as follows: + // // - If requesting an object from the source bucket, Amazon S3 will return the // x-amz-replication-status header if the object in your request is eligible for - // replication. For example, suppose that in your replication configuration, you - // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with - // key prefix TaxDocs . Any objects you upload with this key name prefix, for - // example TaxDocs/document1.pdf , are eligible for replication. For any object - // request with this key name prefix, Amazon S3 will return the - // x-amz-replication-status header with value PENDING, COMPLETED or FAILED - // indicating object replication status. + // replication. + // + // For example, suppose that in your replication configuration, you specify object + // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix + // TaxDocs . Any objects you upload with this key name prefix, for example + // TaxDocs/document1.pdf , are eligible for replication. For any object request + // with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // // - If requesting an object from a destination bucket, Amazon S3 will return // the x-amz-replication-status header with value REPLICA if the object in your // request is a replica that Amazon S3 created and there is no replica modification // replication in progress. + // // - When replicating objects to multiple destination buckets, the // x-amz-replication-status header acts differently. The header of the source // object will only return a value of COMPLETED when replication is successful to // all destinations. The header will remain at value PENDING until replication has // completed for all destinations. If one or more destinations fails replication // the header will return FAILED. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // . This functionality is not supported for directory buckets. + // + // For more information, see [Replication]. + // + // This functionality is not supported for directory buckets. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If the object is an archived object (an object whose storage class is GLACIER), // the response includes this header if either the archive restoration is in - // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // or an archive copy is already restored. If an archive copy is already restored, - // the header value indicates when Amazon S3 is scheduled to delete the object - // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 - // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header - // returns the value ongoing-request="true" . For more information about archiving - // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) - // . This functionality is not supported for directory buckets. Only the S3 Express + // progress (see [RestoreObject]or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value + // ongoing-request="true" . + // + // For more information about archiving objects, see [Transitioning Objects: General Considerations]. + // + // This functionality is not supported for directory buckets. Only the S3 Express // One Zone storage class is supported by directory buckets to store objects. + // + // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by // directory buckets to store objects. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -492,25 +646,25 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -528,6 +682,15 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpHeadObjectValidationMiddleware(stack); err != nil { return err } @@ -537,7 +700,7 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { @@ -564,28 +727,23 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti return nil } -func (v *HeadObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// HeadObjectAPIClient is a client that implements the HeadObject operation. -type HeadObjectAPIClient interface { - HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) -} - -var _ HeadObjectAPIClient = (*Client)(nil) - // ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter type ObjectExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // ObjectExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -601,12 +759,13 @@ type ObjectExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -682,7 +841,16 @@ func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObje } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -737,8 +905,17 @@ type ObjectNotExistsWaiterOptions struct { // Set of options to modify how an operation is invoked. These apply to all // operations invoked for this client. Use functional options on operation call to // modify this list for per operation behavior. + // + // Passing options here is functionally equivalent to passing values to this + // config's ClientOptions field that extend the inner client's APIOptions directly. APIOptions []func(*middleware.Stack) error + // Functional options to be passed to all operations invoked by this client. + // + // Function values that modify the inner APIOptions are applied after the waiter + // config's own APIOptions modifiers. + ClientOptions []func(*Options) + // MinDelay is the minimum amount of time to delay between retries. If unset, // ObjectNotExistsWaiter will use default minimum delay of 5 seconds. Note that // MinDelay must resolve to a value lesser than or equal to the MaxDelay. @@ -754,12 +931,13 @@ type ObjectNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -836,7 +1014,16 @@ func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadO } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } + for _, opt := range options.ClientOptions { + opt(o) + } }) retryable, err := options.Retryable(ctx, params, out, err) @@ -881,6 +1068,20 @@ func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, return true, nil } +func (v *HeadObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 6ecf8458b859..f2ba34439550 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -14,27 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the analytics -// configurations for the bucket. You can have up to 1,000 analytics configurations -// per bucket. This action supports list pagination and does not return more than -// 100 configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated is -// set to false. If there are more configurations to list, IsTruncated is set to -// true, and there will be a value in NextContinuationToken . You use the +// This operation is not supported by directory buckets. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element in +// the response. If there are no more configurations to list, IsTruncated is set +// to false. If there are more configurations to list, IsTruncated is set to true, +// and there will be a value in NextContinuationToken . You use the // NextContinuationToken value to continue the pagination of the list by passing -// the value in continuation-token in the request to GET the next page. To use -// this operation, you must have permissions to perform the +// the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics – -// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to ListBucketAnalyticsConfigurations : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to ListBucketAnalyticsConfigurations : +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { if params == nil { params = &ListBucketAnalyticsConfigurationsInput{} @@ -70,6 +83,7 @@ type ListBucketAnalyticsConfigurationsInput struct { } func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -121,25 +135,25 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -157,6 +171,15 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -166,7 +189,7 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index de4a2079ae0f..01b66028e1b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported by directory buckets. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to ListBucketIntelligentTieringConfigurations include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { if params == nil { params = &ListBucketIntelligentTieringConfigurationsInput{} @@ -64,6 +77,7 @@ type ListBucketIntelligentTieringConfigurationsInput struct { } func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,25 +129,25 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -151,6 +165,15 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -160,7 +183,7 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go index 881f7d92530e..889ad0264771 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -14,26 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of -// inventory configurations for the bucket. You can have up to 1,000 analytics -// configurations per bucket. This action supports list pagination and does not -// return more than 100 configurations at a time. Always check the IsTruncated -// element in the response. If there are no more configurations to list, -// IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there is a value in NextContinuationToken . You -// use the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. To -// use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. +// +// Returns a list of inventory configurations for the bucket. You can have up to +// 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] +// // The following operations are related to ListBucketInventoryConfigurations : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { if params == nil { params = &ListBucketInventoryConfigurationsInput{} @@ -71,6 +85,7 @@ type ListBucketInventoryConfigurationsInput struct { } func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -122,25 +137,25 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -158,6 +173,15 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -167,7 +191,7 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go index fc2cf72877f1..8a3a080afa6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -13,28 +13,42 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the metrics -// configurations for the bucket. The metrics configurations are only for the -// request metrics of the bucket and do not provide information on daily storage -// metrics. You can have up to 1,000 configurations per bucket. This action -// supports list pagination and does not return more than 100 configurations at a -// time. Always check the IsTruncated element in the response. If there are no -// more configurations to list, IsTruncated is set to false. If there are more -// configurations to list, IsTruncated is set to true, and there is a value in -// NextContinuationToken . You use the NextContinuationToken value to continue the -// pagination of the list by passing the value in continuation-token in the -// request to GET the next page. To use this operation, you must have permissions -// to perform the s3:GetMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to ListBucketMetricsConfigurations : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// This operation is not supported by directory buckets. +// +// Lists the metrics configurations for the bucket. The metrics configurations are +// only for the request metrics of the bucket and do not provide information on +// daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For more information about metrics configurations and CloudWatch request +// metrics, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to ListBucketMetricsConfigurations : +// +// [PutBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { if params == nil { params = &ListBucketMetricsConfigurationsInput{} @@ -72,6 +86,7 @@ type ListBucketMetricsConfigurationsInput struct { } func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -124,25 +139,25 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -160,6 +175,15 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -169,7 +193,7 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go index a6189288723f..8d2f98a8c001 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -13,11 +13,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of all -// buckets owned by the authenticated sender of the request. To use this operation, -// you must have the s3:ListAllMyBuckets permission. For information about Amazon -// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// . +// This operation is not supported by directory buckets. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. +// +// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { if params == nil { params = &ListBucketsInput{} @@ -73,25 +76,25 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -109,13 +112,22 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { return err } if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListBucketsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go index 373531ab273c..5fdf9e6c7a24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go @@ -15,23 +15,27 @@ import ( ) // Returns a list of all Amazon S3 directory buckets owned by the authenticated -// sender of the request. For more information about directory buckets, see -// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Regional endpoint. These endpoints -// support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions You must have the -// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy -// instead of a bucket policy. Cross-account access to this API operation isn't -// supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . +// sender of the request. For more information about directory buckets, see [Directory buckets]in the +// Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in +// the Amazon S3 User Guide. +// +// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in +// an IAM identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { if params == nil { params = &ListDirectoryBucketsInput{} @@ -104,25 +108,25 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -140,13 +144,22 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { return err } if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { @@ -173,14 +186,6 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S return nil } -// ListDirectoryBucketsAPIClient is a client that implements the -// ListDirectoryBuckets operation. -type ListDirectoryBucketsAPIClient interface { - ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) -} - -var _ ListDirectoryBucketsAPIClient = (*Client)(nil) - // ListDirectoryBucketsPaginatorOptions is the paginator options for // ListDirectoryBuckets type ListDirectoryBucketsPaginatorOptions struct { @@ -247,6 +252,9 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... } params.MaxDirectoryBuckets = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -266,6 +274,14 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go index 3e6853e689bd..60f9e1306875 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -16,38 +16,45 @@ import ( // This operation lists in-progress multipart uploads in a bucket. An in-progress // multipart upload is a multipart upload that has been initiated by the // CreateMultipartUpload request, but has not yet been completed or aborted. +// // Directory buckets - If multipart uploads in a directory bucket are in progress, // you can't delete the bucket until all the in-progress multipart uploads are -// aborted or completed. The ListMultipartUploads operation returns a maximum of -// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is -// also the default value. You can further limit the number of uploads in a -// response by specifying the max-uploads request parameter. If there are more -// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, -// the response returns an IsTruncated element with the value of true , a -// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining -// multipart uploads, you need to make subsequent ListMultipartUploads requests. -// In these requests, include two query parameters: key-marker and upload-id-marker -// . Set the value of key-marker to the NextKeyMarker value from the previous -// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker -// value from the previous response. Directory buckets - The upload-id-marker -// element and the NextUploadIdMarker element aren't supported by directory -// buckets. To list the additional multipart uploads, you only need to set the -// value of key-marker to the NextKeyMarker value from the previous response. For -// more information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// aborted or completed. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default value. +// You can further limit the number of uploads in a response by specifying the +// max-uploads request parameter. If there are more than 1,000 multipart uploads +// that satisfy your ListMultipartUploads request, the response returns an +// IsTruncated element with the value of true , a NextKeyMarker element, and a +// NextUploadIdMarker element. To list the remaining multipart uploads, you need to +// make subsequent ListMultipartUploads requests. In these requests, include two +// query parameters: key-marker and upload-id-marker . Set the value of key-marker +// to the NextKeyMarker value from the previous response. Similarly, set the value +// of upload-id-marker to the NextUploadIdMarker value from the previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -55,29 +62,48 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting of multipart uploads in response +// // - General purpose bucket - In the ListMultipartUploads response, the multipart // uploads are sorted based on two criteria: +// // - Key-based sorting - Multipart uploads are initially sorted in ascending // order based on their object keys. +// // - Time-based sorting - For uploads that share the same object key, they are // further sorted in ascending order based on the upload initiation time. Among // uploads with the same key, the one that was initiated first will appear before // the ones that were initiated later. +// // - Directory bucket - In the ListMultipartUploads response, the multipart // uploads aren't sorted lexicographically based on the object keys. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListMultipartUploads : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to ListMultipartUploads : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { if params == nil { params = &ListMultipartUploadsInput{} @@ -95,42 +121,52 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart type ListMultipartUploadsInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string - // Character you use to group keys. All keys that contain the same string between - // the prefix, if specified, and the first occurrence of the delimiter after the - // prefix are grouped under a single result element, CommonPrefixes . If you don't - // specify the prefix parameter, then the substring starts at the beginning of the - // key. The keys that are grouped under CommonPrefixes result element are not - // returned elsewhere in the response. Directory buckets - For directory buckets, / - // is the only supported delimiter. + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and the + // first occurrence of the delimiter after the prefix are grouped under a single + // result element, CommonPrefixes . If you don't specify the prefix parameter, then + // the substring starts at the beginning of the key. The keys that are grouped + // under CommonPrefixes result element are not returned elsewhere in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Requests Amazon S3 to encode the object keys in the response and specifies the @@ -147,20 +183,26 @@ type ListMultipartUploadsInput struct { ExpectedBucketOwner *string // Specifies the multipart upload after which listing should begin. + // // - General purpose buckets - For general purpose buckets, key-marker is an // object key. Together with upload-id-marker , this parameter specifies the - // multipart upload after which listing should begin. If upload-id-marker is not - // specified, only the keys lexicographically greater than the specified - // key-marker will be included in the list. If upload-id-marker is specified, any - // multipart uploads for a key equal to the key-marker might also be included, - // provided those multipart uploads have upload IDs lexicographically greater than - // the specified upload-id-marker . + // multipart upload after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to the + // key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker . + // // - Directory buckets - For directory buckets, key-marker is obfuscated and // isn't a real object key. The upload-id-marker parameter isn't supported by // directory buckets. To list the additional multipart uploads, you only need to // set the value of key-marker to the NextKeyMarker value from the previous - // response. In the ListMultipartUploads response, the multipart uploads aren't - // sorted lexicographically based on the object keys. + // response. + // + // In the ListMultipartUploads response, the multipart uploads aren't sorted + // lexicographically based on the object keys. KeyMarker *string // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the @@ -171,32 +213,38 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) Directory buckets - For directory buckets, only - // prefixes that end in a delimiter ( / ) are supported. + // use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . This functionality is not - // supported for directory buckets. + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string noSmithyDocumentSerde } func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -210,20 +258,25 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // distinct key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. CommonPrefixes []types.CommonPrefix // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. Directory - // buckets - For directory buckets, / is the only supported delimiter. + // delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If you - // specify the encoding-type request parameter, Amazon S3 includes this element in - // the response, and returns encoded key name values in the following response - // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . EncodingType types.EncodingType // Indicates whether the returned list of multipart uploads is truncated. A value @@ -244,22 +297,31 @@ type ListMultipartUploadsOutput struct { NextKeyMarker *string // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. This - // functionality is not supported for directory buckets. + // for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. NextUploadIdMarker *string // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. - // Directory buckets - For directory buckets, only prefixes that end in a delimiter - // ( / ) are supported. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // Upload ID after which listing began. This functionality is not supported for - // directory buckets. + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter is + // ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can @@ -294,25 +356,25 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -330,6 +392,15 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { return err } @@ -339,7 +410,7 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go index 2a0cd10fa776..2f7cc8d2c939 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -13,19 +13,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns metadata about -// all versions of the objects in a bucket. You can also use request parameters as -// selection criteria to return metadata about a subset of all the object versions. +// This operation is not supported by directory buckets. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// // To use this operation, you must have permission to perform the -// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK -// response can contain valid or invalid XML. Make sure to design your application -// to parse the contents of the response and handle it appropriately. To use this -// operation, you must have READ access to the bucket. The following operations are -// related to ListObjectVersions : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// s3:ListBucketVersions action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { if params == nil { params = &ListObjectVersionsInput{} @@ -93,10 +108,12 @@ type ListObjectVersionsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Specifies the object version you want to start listing from. @@ -106,6 +123,7 @@ type ListObjectVersionsInput struct { } func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -127,10 +145,13 @@ type ListObjectVersionsOutput struct { // max-keys limitation. These keys are not returned elsewhere in the response. Delimiter *string - // Encoding type used by Amazon S3 to encode object key names in the XML response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . + // response elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -164,7 +185,9 @@ type ListObjectVersionsOutput struct { Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Marks the last version of the key returned in a truncated response. @@ -201,25 +224,25 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -237,6 +260,15 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { return err } @@ -246,7 +278,7 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go index f0732edc7d47..3ad2ff825056 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -13,19 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns some or all (up -// to 1,000) of the objects in a bucket. You can use the request parameters as -// selection criteria to return a subset of the objects in a bucket. A 200 OK -// response can contain valid or invalid XML. Be sure to design your application to -// parse the contents of the response and handle it appropriately. This action has -// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// , when developing applications. For backward compatibility, Amazon S3 continues -// to support ListObjects . The following operations are related to ListObjects : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// This operation is not supported by directory buckets. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use the +// request parameters as selection criteria to return a subset of the objects in a +// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design +// your application to parse the contents of the response and handle it +// appropriately. +// +// This action has been revised. We recommend that you use the newer version, [ListObjectsV2], +// when developing applications. For backward compatibility, Amazon S3 continues to +// support ListObjects . +// +// The following operations are related to ListObjects : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListBuckets] +// +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { if params == nil { params = &ListObjectsInput{} @@ -43,31 +59,39 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF type ListObjectsInput struct { - // The name of the bucket containing the objects. Directory buckets - When you use - // this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // The name of the bucket containing the objects. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,6 +137,7 @@ type ListObjectsInput struct { } func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -121,14 +146,20 @@ func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { type ListObjectsOutput struct { // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. A response can contain - // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if - // there are any) keys between Prefix and the next occurrence of the string - // specified by the delimiter. CommonPrefixes lists keys that act like - // subdirectories in the directory specified by Prefix . For example, if the prefix - // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the - // common prefix is notes/summer/ . All of the keys that roll up into a common - // prefix count as a single return when calculating the number of returns. + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. @@ -141,7 +172,9 @@ type ListObjectsOutput struct { // MaxKeys value. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If using + // url , non-ASCII characters used in an object's key name will be URL encoded. For + // example, the object test_file(3).png will appear as test_file%283%29.png . EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -161,18 +194,21 @@ type ListObjectsOutput struct { // When the response is truncated (the IsTruncated element value in the response // is true ), you can use the key name in this field as the marker parameter in // the subsequent request to get the next set of objects. Amazon S3 lists objects - // in alphabetical order. This element is returned only if you have the delimiter - // request parameter specified. If the response does not include the NextMarker - // element and it is truncated, you can use the value of the last Key element in - // the response as the marker parameter in the subsequent request to get the next - // set of object keys. + // in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it is + // truncated, you can use the value of the last Key element in the response as the + // marker parameter in the subsequent request to get the next set of object keys. NextMarker *string // Keys that begin with the indicated prefix. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -203,25 +239,25 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -239,6 +275,15 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListObjectsValidationMiddleware(stack); err != nil { return err } @@ -248,7 +293,7 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListObjectsUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go index b2f182dfb6d3..904497361957 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -17,26 +17,29 @@ import ( // You can use the request parameters as selection criteria to return a subset of // the objects in a bucket. A 200 OK response can contain valid or invalid XML. // Make sure to design your application to parse the contents of the response and -// handle it appropriately. For more information about listing objects, see -// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// . Directory buckets - For directory buckets, you must make requests for this API +// handle it appropriately. +// +// For more information about listing objects, see [Listing object keys programmatically] in the Amazon S3 User Guide. +// To get a list of your buckets, see [ListBuckets]. +// +// Directory buckets - For directory buckets, you must make requests for this API // operation to the Zonal endpoint. These endpoints support virtual-hosted-style // requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - To use this operation, you must have // READ access to the bucket. You must have permission to perform the // s3:ListBucket action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -44,24 +47,42 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting order of returned objects +// // - General purpose bucket - For general purpose buckets, ListObjectsV2 returns // objects in lexicographical order based on their key names. +// // - Directory bucket - For directory buckets, ListObjectsV2 does not return // objects in lexicographical order. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the -// latest revision of this action. We recommend that you use this revised API -// operation for application development. For backward compatibility, Amazon S3 -// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// . The following operations are related to ListObjectsV2 : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, [ListObjects]. +// +// The following operations are related to ListObjectsV2 : +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { if params == nil { params = &ListObjectsV2Input{} @@ -79,30 +100,37 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, type ListObjectsV2Input struct { - // Directory buckets - When you use this operation with a directory bucket, you + // Directory buckets - When you use this operation with a directory bucket, you // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,16 +141,20 @@ type ListObjectsV2Input struct { ContinuationToken *string // A delimiter is a character that you use to group keys. - // - Directory buckets - For directory buckets, / is the only supported - // delimiter. + // + // - Directory buckets - For directory buckets, / is the only supported delimiter. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If using + // url , non-ASCII characters used in an object's key name will be URL encoded. For + // example, the object test_file(3).png will appear as test_file%283%29.png . EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -132,8 +164,10 @@ type ListObjectsV2Input struct { // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner - // field to true . Directory buckets - For directory buckets, the bucket owner is - // returned as the object owner for all objects. + // field to true . + // + // Directory buckets - For directory buckets, the bucket owner is returned as the + // object owner for all objects. FetchOwner *bool // Sets the maximum number of keys returned in the response. By default, the @@ -142,29 +176,35 @@ type ListObjectsV2Input struct { MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. This functionality is not supported - // for directory buckets. + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. OptionalObjectAttributes []types.OptionalObjectAttributes - // Limits the response to keys that begin with the specified prefix. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that she or he will be charged for the list // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. This functionality is not supported for directory buckets. + // their requests. + // + // This functionality is not supported for directory buckets. RequestPayer types.RequestPayer // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. This - // functionality is not supported for directory buckets. + // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. StartAfter *string noSmithyDocumentSerde } func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -174,43 +214,57 @@ type ListObjectsV2Output struct { // All of the keys (up to 1,000) that share the same prefix are grouped together. // When counting the total numbers of returns by this API operation, this group of - // keys is considered as one item. A response can contain CommonPrefixes only if - // you specify a delimiter. CommonPrefixes contains all (if there are any) keys - // between Prefix and the next occurrence of the string specified by a delimiter. + // keys is considered as one item. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by a delimiter. + // // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . For example, if the prefix is notes/ and the delimiter is - // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All - // of the keys that roll up into a common prefix count as a single return when - // calculating the number of returns. + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. + // // - Directory buckets - For directory buckets, only prefixes that end in a // delimiter ( / ) are supported. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html CommonPrefixes []types.CommonPrefix // Metadata about each object returned. Contents []types.Object - // If ContinuationToken was sent with the request, it is included in the response. - // You can use the returned ContinuationToken for pagination of the list response. - // You can use this ContinuationToken for pagination of the list results. + // If ContinuationToken was sent with the request, it is included in the + // response. You can use the returned ContinuationToken for pagination of the list + // response. You can use this ContinuationToken for pagination of the list + // results. ContinuationToken *string // Causes keys that contain the same string between the prefix and the first // occurrence of the delimiter to be rolled up into a single result element in the // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. Directory buckets - For directory buckets, / is the only - // supported delimiter. + // MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: Delimiter, Prefix, Key, and StartAfter . + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter . EncodingType types.EncodingType // Set to false if all of the results were returned. Set to true if more keys are @@ -237,16 +291,21 @@ type ListObjectsV2Output struct { // obfuscated and is not a real key NextContinuationToken *string - // Keys that begin with the indicated prefix. Directory buckets - For directory - // buckets, only prefixes that end in a delimiter ( / ) are supported. + // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If StartAfter was sent with the request, it is included in the response. This - // functionality is not supported for directory buckets. + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. StartAfter *string // Metadata pertaining to the operation's result. @@ -277,25 +336,25 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -313,6 +372,15 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { return err } @@ -322,7 +390,7 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { @@ -349,20 +417,6 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o return nil } -func (v *ListObjectsV2Input) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) -} - -var _ ListObjectsV2APIClient = (*Client)(nil) - // ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 type ListObjectsV2PaginatorOptions struct { // Sets the maximum number of keys returned in the response. By default, the @@ -428,6 +482,9 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O } params.MaxKeys = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -450,6 +507,20 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O return result, nil } +func (v *ListObjectsV2Input) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go index 11b0d59b6d78..9ff3365815b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -14,55 +14,79 @@ import ( "time" ) -// Lists the parts that have been uploaded for a specific multipart upload. To use -// this operation, you must provide the upload ID in the request. You obtain this -// uploadID by sending the initiate multipart upload request through -// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// Lists the parts that have been uploaded for a specific multipart upload. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of // 1,000 parts is also the default value. You can restrict the number of parts in a // response by specifying the max-parts request parameter. If your multipart // upload consists of more than 1,000 parts, the response returns an IsTruncated // field with the value of true , and a NextPartNumberMarker element. To list // remaining uploaded parts, in subsequent ListParts requests, include the // part-number-marker query string parameter and set its value to the -// NextPartNumberMarker field value from the previous response. For more -// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. If the upload was created using server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must -// have permission to the kms:Decrypt action for the ListParts request to -// succeed. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If the upload was created using server-side encryption with Key Management +// +// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt +// action for the ListParts request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to ListParts : +// +// [CreateMultipartUpload] +// +// [UploadPart] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListParts : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [GetObjectAttributes] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { if params == nil { params = &ListPartsInput{} @@ -80,31 +104,39 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns . type ListPartsInput struct { - // The name of the bucket to which the parts are being uploaded. Directory buckets - // - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // The name of the bucket to which the parts are being uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -135,37 +167,46 @@ type ListPartsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -177,17 +218,21 @@ type ListPartsOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, then the response includes this header indicating // when the initiated multipart upload will become eligible for abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // . The response will also include the x-amz-abort-rule-id header that will - // provide the ID of the lifecycle configuration rule that defines this action. + // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + // // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not @@ -203,7 +248,7 @@ type ListPartsOutput struct { // provides the user ARN and display name. Initiator *types.Initiator - // Indicates whether the returned list of parts is truncated. A true value + // Indicates whether the returned list of parts is truncated. A true value // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. IsTruncated *bool @@ -221,13 +266,14 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. Directory buckets - The bucket owner is - // returned as the object owner for all the parts. + // the parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the parts. Owner *types.Owner - // When a list is truncated, this element specifies the last part in the list, as - // well as the value to use for the part-number-marker request parameter in a - // subsequent request. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *string // Container for elements related to a particular part. A response can contain @@ -235,12 +281,15 @@ type ListPartsOutput struct { Parts []types.Part // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // The class of storage used to store the uploaded object. Directory buckets - - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // The class of storage used to store the uploaded object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass // Upload ID identifying the multipart upload whose parts are being listed. @@ -274,25 +323,25 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -310,6 +359,15 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpListPartsValidationMiddleware(stack); err != nil { return err } @@ -319,7 +377,7 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addListPartsUpdateEndpoint(stack, options); err != nil { @@ -346,20 +404,6 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio return nil } -func (v *ListPartsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false - } - return *v.Bucket, true -} - -// ListPartsAPIClient is a client that implements the ListParts operation. -type ListPartsAPIClient interface { - ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) -} - -var _ ListPartsAPIClient = (*Client)(nil) - // ListPartsPaginatorOptions is the paginator options for ListParts type ListPartsPaginatorOptions struct { // Sets the maximum number of parts to return. @@ -423,6 +467,9 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio } params.MaxParts = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListParts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -445,6 +492,20 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio return result, nil } +func (v *ListPartsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go index 80344efb1cad..03da71d2e488 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -15,30 +15,45 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the accelerate -// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a -// bucket-level feature that enables you to perform faster data transfers to Amazon -// S3. To use this operation, you must have permission to perform the +// This operation is not supported by directory buckets. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster data +// transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the // s3:PutAccelerateConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The Transfer Acceleration state of a bucket can be set to one of the following +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The Transfer Acceleration state of a bucket can be set to one of the following // two values: +// // - Enabled – Enables accelerated data transfers to the bucket. +// // - Suspended – Disables accelerated data transfers to the bucket. // -// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// action returns the transfer acceleration state of a bucket. After setting the -// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty -// minutes before the data transfer rates to the bucket increase. The name of the -// bucket used for Transfer Acceleration must be DNS-compliant and must not contain -// periods ("."). For more information about transfer acceleration, see Transfer -// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . The following operations are related to PutBucketAccelerateConfiguration : -// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it might +// take up to thirty minutes before the data transfer rates to the bucket increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant and +// must not contain periods ("."). +// +// For more information about transfer acceleration, see [Transfer Acceleration]. +// +// The following operations are related to PutBucketAccelerateConfiguration : +// +// [GetBucketAccelerateConfiguration] +// +// [CreateBucket] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { if params == nil { params = &PutBucketAccelerateConfigurationInput{} @@ -70,10 +85,13 @@ type PutBucketAccelerateConfigurationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -85,6 +103,7 @@ type PutBucketAccelerateConfigurationInput struct { } func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -118,25 +137,25 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -154,6 +173,15 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -163,7 +191,7 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go index 6382d27691dc..35282558c4c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -15,89 +15,159 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the permissions on -// an existing bucket using access control lists (ACL). For more information, see -// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can -// use one of the following two ways to set a bucket's permissions: +// This operation is not supported by directory buckets. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the +// WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// // - Specify the ACL in the request body +// // - Specify permissions using request headers // // You cannot specify access permission using both the body and the request -// headers. Depending on your application needs, you may choose to set the ACL on a -// bucket using either the request body or the headers. For example, if you have an +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an // existing application that updates a bucket ACL using the request body, then you -// can continue to use that approach. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions by using -// one of the following methods: +// can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions by using one of the following +// methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-acl . If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use the // x-amz-acl header to set a canned ACL. These parameters map to the set of -// permissions that Amazon S3 supports in an ACL. For more information, see -// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-write header grants create, overwrite, and delete objects -// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" +// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-write header grants create, overwrite, +// +// and delete objects permission to LogDelivery group predefined by Amazon S3 and +// two Amazon Web Services accounts identified by their email addresses. +// +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// +// id="111122223333", id="555566667777" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the -// CanonicalUser and, in a response to a GET Object acl request, appears as the -// CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>& +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // The following operations are related to PutBucketAcl : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetObjectAcl] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { if params == nil { params = &PutBucketAclInput{} @@ -130,17 +200,23 @@ type PutBucketAclInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -158,9 +234,10 @@ type PutBucketAclInput struct { // Allows grantee to read the bucket ACL. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string // Allows grantee to write the ACL for the applicable bucket. @@ -170,6 +247,7 @@ type PutBucketAclInput struct { } func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -203,25 +281,25 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -239,6 +317,15 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { return err } @@ -248,7 +335,7 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 9b1009f65153..9d0908613d1e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -14,45 +14,67 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets an analytics -// configuration for the bucket (specified by the analytics configuration ID). You -// can have up to 1,000 analytics configurations per bucket. You can choose to have -// storage class analysis export analysis reports sent to a comma-separated values -// (CSV) flat file. See the DataExport request element. Reports are updated daily -// and are based on the object filters that you configure. When selecting data -// export, you specify a destination bucket and an optional destination prefix -// where the file is written. You can export the data to a destination bucket in a -// different account. However, the destination bucket must be in the same Region as -// the bucket that you are making the PUT analytics configuration to. For more -// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . You must create a bucket policy on the destination bucket where the exported +// This operation is not supported by directory buckets. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per bucket. +// +// You can choose to have storage class analysis export analysis reports sent to a +// comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you +// configure. When selecting data export, you specify a destination bucket and an +// optional destination prefix where the file is written. You can export the data +// to a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// You must create a bucket policy on the destination bucket where the exported // file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . To use this operation, you must have permissions to perform the +// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketAnalyticsConfiguration has the following special errors: +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketAnalyticsConfiguration has the following special errors: +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: InvalidArgument +// // - Cause: Invalid argument. +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: TooManyConfigurations +// // - Cause: You are attempting to create a new configuration but have already // reached the 1,000-configuration limit. +// // - HTTP Error: HTTP 403 Forbidden +// // - Code: AccessDenied +// // - Cause: You are not the owner of the specified bucket, or you do not have // the s3:PutAnalyticsConfiguration bucket permission to set the configuration on // the bucket. // // The following operations are related to PutBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &PutBucketAnalyticsConfigurationInput{} @@ -94,6 +116,7 @@ type PutBucketAnalyticsConfigurationInput struct { } func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -127,25 +150,25 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -163,6 +186,15 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -172,7 +204,7 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go index 394a2bad8bfd..6ce434d6f706 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -15,35 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the cors -// configuration for your bucket. If the configuration exists, Amazon S3 replaces -// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// This operation is not supported by directory buckets. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. You set this configuration on a bucket so that the bucket can service +// others. +// +// You set this configuration on a bucket so that the bucket can service // cross-origin requests. For example, you might want to enable a request whose // origin is http://www.example.com to access your Amazon S3 bucket at -// my.example.bucket.com by using the browser's XMLHttpRequest capability. To -// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// my.example.bucket.com by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors // subresource to the bucket. The cors subresource is an XML document in which you // configure rules that identify origins and the HTTP methods that can be executed -// on your bucket. The document is limited to 64 KB in size. When Amazon S3 -// receives a cross-origin request (or a pre-flight OPTIONS request) against a -// bucket, it evaluates the cors configuration on the bucket and uses the first -// CORSRule rule that matches the incoming browser request to enable a cross-origin -// request. For a rule to match, the following conditions must be met: +// on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS +// request) against a bucket, it evaluates the cors configuration on the bucket +// and uses the first CORSRule rule that matches the incoming browser request to +// enable a cross-origin request. For a rule to match, the following conditions +// must be met: +// // - The request's Origin header must match AllowedOrigin elements. +// // - The request method (for example, GET, PUT, HEAD, and so on) or the // Access-Control-Request-Method header in case of a pre-flight OPTIONS request // must be one of the AllowedMethod elements. +// // - Every header specified in the Access-Control-Request-Headers request header // of a pre-flight request must match an AllowedHeader element. // -// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketCors : -// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketCors : +// +// [GetBucketCors] +// +// [DeleteBucketCors] +// +// [RESTOPTIONSobject] +// +// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { if params == nil { params = &PutBucketCorsInput{} @@ -67,8 +86,9 @@ type PutBucketCorsInput struct { Bucket *string // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) - // in the Amazon S3 User Guide. + // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. + // + // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html // // This member is required. CORSConfiguration *types.CORSConfiguration @@ -77,17 +97,23 @@ type PutBucketCorsInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +125,7 @@ type PutBucketCorsInput struct { } func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -132,25 +159,25 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -168,6 +195,15 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -177,7 +213,7 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go index 615e98a3d6ca..187e73f2484d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -15,30 +15,41 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action uses the -// encryption subresource to configure default encryption and Amazon S3 Bucket Keys -// for an existing bucket. By default, all buckets have a default encryption -// configuration that uses server-side encryption with Amazon S3 managed keys -// (SSE-S3). You can optionally configure default encryption for a bucket by using -// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or -// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). -// If you specify default encryption by using SSE-KMS, you can also configure -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does -// not validate the KMS key ID provided in PutBucketEncryption requests. This -// action requires Amazon Web Services Signature Version 4. For more information, -// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// . To use this operation, you must have permission to perform the +// This operation is not supported by directory buckets. +// +// This action uses the encryption subresource to configure default encryption and +// Amazon S3 Bucket Keys for an existing bucket. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally +// configure default encryption for a bucket by using server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption +// by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. If you use PutBucketEncryption to +// set your [default bucket encryption]to SSE-KMS, you should verify that your KMS key ID is correct. Amazon +// S3 does not validate the KMS key ID provided in PutBucketEncryption requests. +// +// This action requires Amazon Web Services Signature Version 4. For more +// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. +// +// To use this operation, you must have permission to perform the // s3:PutEncryptionConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketEncryption : -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketEncryption : +// +// [GetBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { if params == nil { params = &PutBucketEncryptionInput{} @@ -62,8 +73,9 @@ type PutBucketEncryptionInput struct { // (SSE-S3). You can optionally configure default encryption for a bucket by using // server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a // customer-provided key (SSE-C). For information about the bucket default - // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon S3 User Guide. + // encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. + // + // [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html // // This member is required. Bucket *string @@ -77,16 +89,20 @@ type PutBucketEncryptionInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the server-side encryption - // configuration. For requests made using the Amazon Web Services Command Line - // Interface (CLI) or Amazon Web Services SDKs, this field is calculated - // automatically. + // configuration. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -98,6 +114,7 @@ type PutBucketEncryptionInput struct { } func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -131,25 +148,25 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -167,6 +184,15 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -176,7 +202,7 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index a5f8fc7ad445..5087cdee9522 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -14,37 +14,58 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Puts a S3 -// Intelligent-Tiering configuration to the specified bucket. You can have up to -// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported by directory buckets. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can +// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to PutBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] // // You only need S3 Intelligent-Tiering enabled on a bucket if you want to // automatically move objects stored in the S3 Intelligent-Tiering storage class to // the Archive Access or Deep Archive Access tier. -// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP -// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad -// Request Error Code: TooManyConfigurations Cause: You are attempting to create a -// new configuration but have already reached the 1,000-configuration limit. HTTP -// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you -// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set -// the configuration on the bucket. +// +// PutBucketIntelligentTieringConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission +// to set the configuration on the bucket. +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &PutBucketIntelligentTieringConfigurationInput{} @@ -82,6 +103,7 @@ type PutBucketIntelligentTieringConfigurationInput struct { } func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,25 +137,25 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -151,6 +173,15 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -160,7 +191,7 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go index e2d06796a486..b15df17a15bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -14,48 +14,76 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the PUT action adds an inventory configuration (identified by the inventory ID) -// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// This operation is not supported by directory buckets. +// +// This implementation of the PUT action adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// // Amazon S3 inventory generates inventories of the objects in the bucket on a // daily or weekly basis, and the results are published to a flat file. The bucket // that is inventoried is called the source bucket, and the bucket where the // inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. When -// you configure an inventory for a source bucket, you specify the destination -// bucket where you want the inventory to be stored, and whether to generate the -// inventory daily or weekly. You can also configure what object metadata to -// include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon S3 User Guide. You must create a bucket policy on the destination -// bucket to grant permissions to Amazon S3 to write objects to the bucket in the -// defined location. For an example policy, see Granting Permissions for Amazon S3 -// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . Permissions To use this operation, you must have permission to perform the +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the +// destination bucket where you want the inventory to be stored, and whether to +// generate the inventory daily or weekly. You can also configure what object +// metadata to include and whether to inventory all object versions or only current +// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For an +// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// Permissions To use this operation, you must have permission to perform the // s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. The -// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) -// report that includes all object metadata fields available and to specify the +// default and can grant this permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report +// that includes all object metadata fields available and to specify the // destination bucket to store the inventory. A user with read access to objects in // the destination bucket can also access all object metadata fields that are -// available in the inventory report. To restrict access to an inventory report, -// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) -// in the Amazon S3 User Guide. For more information about the metadata fields -// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) -// in the Amazon S3 User Guide. For more information about permissions, see -// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following -// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid -// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. The following -// operations are related to PutBucketInventoryConfiguration : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// available in the inventory report. +// +// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. +// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] +// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] +// in the Amazon S3 User Guide. +// +// PutBucketInventoryConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutInventoryConfiguration bucket permission to set +// the configuration on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration : +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html +// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 +// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { if params == nil { params = &PutBucketInventoryConfigurationInput{} @@ -97,6 +125,7 @@ type PutBucketInventoryConfigurationInput struct { } func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -130,25 +159,25 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -166,6 +195,15 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -175,7 +213,7 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go index ac2b63ebb2b4..5f20c6d566c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -15,25 +15,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a new lifecycle -// configuration for the bucket or replaces an existing lifecycle configuration. -// Keep in mind that this will overwrite an existing lifecycle configuration, so if -// you want to retain any configuration details, they must be included in the new -// lifecycle configuration. For information about lifecycle configuration, see -// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// . Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version of the -// API supported filtering based only on an object key name prefix, which is -// supported for backward compatibility. For the related API description, see -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// . Rules You specify the lifecycle configuration in your request body. The +// This operation is not supported by directory buckets. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information about +// lifecycle configuration, see [Managing your storage lifecycle]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API. The +// previous version of the API supported filtering based only on an object key name +// prefix, which is supported for backward compatibility. For the related API +// description, see [PutBucketLifecycle]. +// +// Rules You specify the lifecycle configuration in your request body. The // lifecycle configuration is specified as XML consisting of one or more rules. An // Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not // adjustable. Each rule consists of the following: // // - A filter identifying a subset of objects to which the rule applies. The -// filter can be based on a key name prefix, object tags, or a combination of both. +// filter can be based on a key name prefix, object tags, object size, or any +// combination of these. // // - A status indicating whether the rule is in effect. // @@ -44,28 +48,44 @@ import ( // versions). Amazon S3 provides predefined actions that you can specify for // current and noncurrent object versions. // -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) -// . Permissions By default, all Amazon S3 resources are private, including -// buckets, objects, and related subresources (for example, lifecycle configuration -// and website configuration). Only the resource owner (that is, the Amazon Web +// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. +// +// Permissions By default, all Amazon S3 resources are private, including buckets, +// objects, and related subresources (for example, lifecycle configuration and +// website configuration). Only the resource owner (that is, the Amazon Web // Services account that created it) can access the resource. The resource owner // can optionally grant access permissions to others by writing an access policy. -// For this operation, a user must get the s3:PutLifecycleConfiguration -// permission. You can also explicitly deny permissions. An explicit deny also -// supersedes any other permissions. If you want to block users or accounts from -// removing or deleting objects from your bucket, you must deny them permissions -// for the following actions: +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. An explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// // - s3:DeleteObject +// // - s3:DeleteObjectVersion +// // - s3:PutLifecycleConfiguration // -// For more information about permissions, see Managing Access Permissions to Your -// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to PutBucketLifecycleConfiguration : -// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The following operations are related to PutBucketLifecycleConfiguration : +// +// [Examples of Lifecycle Configuration] +// +// [GetBucketLifecycleConfiguration] +// +// [DeleteBucketLifecycle] +// +// [Examples of Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { params = &PutBucketLifecycleConfigurationInput{} @@ -92,10 +112,13 @@ type PutBucketLifecycleConfigurationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -110,6 +133,7 @@ type PutBucketLifecycleConfigurationInput struct { } func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -143,25 +167,25 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -179,6 +203,15 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -188,7 +221,7 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go index e69fa24c70e9..cf2183d0eb60 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -15,39 +15,68 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Set the logging -// parameters for a bucket and to specify permissions for who can view and modify -// the logging parameters. All logs are saved to buckets in the same Amazon Web -// Services Region as the source bucket. To set the logging status of a bucket, you -// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL -// to all logs. You use the Grantee request element to grant access to other -// people. The Permissions request element specifies the kind of access the -// grantee has to the logs. If the target bucket for log delivery uses the bucket -// owner enforced setting for S3 Object Ownership, you can't use the Grantee -// request element to grant access to others. Permissions can only be granted using -// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) -// to whom you're assigning access rights (by using request elements) in the -// following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By Email address: <>Grantees@email.com<> The grantee is resolved to the -// CanonicalUser and, in a response to a GETObjectAcl request, appears as the -// CanonicalUser. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// This operation is not supported by directory buckets. +// +// Set the logging parameters for a bucket and to specify permissions for who can +// view and modify the logging parameters. All logs are saved to buckets in the +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the +// Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (by using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By Email address: +// +// <>Grantees@email.com<> +// +// The grantee is resolved to the CanonicalUser and, in a response to a +// +// GETObjectAcl request, appears as the CanonicalUser. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // To enable logging, you use LoggingEnabled and its children request elements. To -// disable logging, you use an empty BucketLoggingStatus request element: For -// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) -// in the Amazon S3 User Guide. For more information about creating a bucket, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . For more information about returning the logging status of a bucket, see -// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) -// . The following operations are related to PutBucketLogging : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// disable logging, you use an empty BucketLoggingStatus request element: +// +// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User +// Guide. +// +// For more information about creating a bucket, see [CreateBucket]. For more information about +// returning the logging status of a bucket, see [GetBucketLogging]. +// +// The following operations are related to PutBucketLogging : +// +// [PutObject] +// +// [DeleteBucket] +// +// [CreateBucket] +// +// [GetBucketLogging] +// +// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { if params == nil { params = &PutBucketLoggingInput{} @@ -79,15 +108,19 @@ type PutBucketLoggingInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutBucketLogging request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the PutBucketLogging request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +132,7 @@ type PutBucketLoggingInput struct { } func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -132,25 +166,25 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -168,6 +202,15 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -177,7 +220,7 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go index 099736c11e99..837e7180cc49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -14,29 +14,44 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets a metrics -// configuration (specified by the metrics configuration ID) for the bucket. You -// can have up to 1,000 metrics configurations per bucket. If you're updating an -// existing metrics configuration, note that this is a full replacement of the -// existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to PutBucketMetricsConfiguration : -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// This operation is not supported by directory buckets. +// +// Sets a metrics configuration (specified by the metrics configuration ID) for +// the bucket. You can have up to 1,000 metrics configurations per bucket. If +// you're updating an existing metrics configuration, note that this is a full +// replacement of the existing metrics configuration. If you don't include the +// elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to PutBucketMetricsConfiguration : +// +// [DeleteBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] // // PutBucketMetricsConfiguration has the following special error: +// // - Error code: TooManyConfigurations +// // - Description: You are attempting to create a new configuration but have // already reached the 1,000-configuration limit. +// // - HTTP Status Code: HTTP 400 Bad Request +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { if params == nil { params = &PutBucketMetricsConfigurationInput{} @@ -79,6 +94,7 @@ type PutBucketMetricsConfigurationInput struct { } func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,25 +128,25 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -148,6 +164,15 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -157,7 +182,7 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go index 7139f6ea6a84..d6d4e5109642 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -14,41 +14,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Enables notifications of -// specified events for a bucket. For more information about event notifications, -// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . Using this API, you can replace an existing notification configuration. The +// This operation is not supported by directory buckets. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see [Configuring Event Notifications]. +// +// Using this API, you can replace an existing notification configuration. The // configuration is an XML file that defines the event types that you want Amazon // S3 to publish and the destination where you want Amazon S3 to publish an event -// notification when it detects an event of the specified type. By default, your -// bucket has no event notifications configured. That is, the notification -// configuration will be an empty NotificationConfiguration . This action -// replaces the existing notification configuration with the configuration you -// include in the request body. After Amazon S3 receives this request, it first -// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon -// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner -// has permission to publish to it by sending a test notification. In the case of -// Lambda destinations, Amazon S3 verifies that the Lambda function permissions -// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For -// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . You can disable notifications by adding the empty NotificationConfiguration -// element. For more information about the number of event notification -// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) -// in Amazon Web Services General Reference. By default, only the bucket owner can -// configure notifications on a bucket. However, bucket owners can use a bucket -// policy to grant permission to other users to set this configuration with the -// required s3:PutBucketNotification permission. The PUT notification is an atomic -// operation. For example, suppose your notification configuration includes SNS -// topic, SQS queue, and Lambda function configurations. When you send a PUT -// request with this configuration, Amazon S3 sends test messages to your SNS -// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will -// not add the configuration to your bucket. If the configuration in the request -// body includes only one TopicConfiguration specifying only the -// s3:ReducedRedundancyLostObject event type, the response will also include the -// x-amz-sns-test-message-id header containing the message ID of the test -// notification sent to the topic. The following action is related to -// PutBucketNotificationConfiguration : -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration . +// +// This action replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon Simple +// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) +// destination exists, and that the bucket owner has permission to publish to it by +// sending a test notification. In the case of Lambda destinations, Amazon S3 +// verifies that the Lambda function permissions grant Amazon S3 permission to +// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations that +// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with the required s3:PutBucketNotification +// permission. +// +// The PUT notification is an atomic operation. For example, suppose your +// notification configuration includes SNS topic, SQS queue, and Lambda function +// configurations. When you send a PUT request with this configuration, Amazon S3 +// sends test messages to your SNS topic. If the message fails, the entire PUT +// action will fail, and Amazon S3 will not add the configuration to your bucket. +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration : +// +// [GetBucketNotificationConfiguration] +// +// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { if params == nil { params = &PutBucketNotificationConfigurationInput{} @@ -90,6 +108,7 @@ type PutBucketNotificationConfigurationInput struct { } func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -123,25 +142,25 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -159,6 +178,15 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -168,7 +196,7 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go index f89f86d65edd..99e26fd558b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:PutBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) -// . The following operations are related to PutBucketOwnershipControls : -// - GetBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported by directory buckets. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For more +// information about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using object ownership]. +// +// The following operations are related to PutBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html +// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { if params == nil { params = &PutBucketOwnershipControlsInput{} @@ -51,9 +59,10 @@ type PutBucketOwnershipControlsInput struct { // This member is required. OwnershipControls *types.OwnershipControls - // The MD5 hash of the OwnershipControls request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the OwnershipControls request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -65,6 +74,7 @@ type PutBucketOwnershipControlsInput struct { } func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -98,25 +108,25 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -134,6 +144,15 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -143,7 +162,7 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go index b3da186ac83c..93f66ab6196f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -15,48 +15,64 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - -// For directory buckets, you must make requests for this API operation to the -// Regional endpoint. These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the PutBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see [Regional and Zonal endpoints]in +// the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the PutBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:PutBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:PutBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following operations are related to PutBucketPolicy : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// The following operations are related to PutBucketPolicy : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { if params == nil { params = &PutBucketPolicyInput{} @@ -74,21 +90,26 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp type PutBucketPolicyInput struct { - // The name of the bucket. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format + // The name of the bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format // https://s3express-control.region_code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 + // ). For information about bucket naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User + // Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string - // The bucket policy as a JSON document. For directory buckets, the only IAM - // action supported in the bucket policy is s3express:CreateSession . + // The bucket policy as a JSON document. + // + // For directory buckets, the only IAM action supported in the bucket policy is + // s3express:CreateSession . // // This member is required. Policy *string @@ -97,45 +118,61 @@ type PutBucketPolicyInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm + // parameter and uses the checksum algorithm that matches the provided value in + // x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. This functionality is not supported - // for directory buckets. + // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. ConfirmRemoveSelfBucketAccess *bool - // The MD5 hash of the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. This functionality is not supported for directory - // buckets. + // The MD5 hash of the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -169,25 +206,25 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -205,6 +242,15 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -214,7 +260,7 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go index ddf58ad88954..2f9d26774f36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -15,47 +15,71 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a replication -// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. Specify the replication configuration in the -// request body. In the replication configuration, you provide the name of the -// destination bucket or buckets where you want Amazon S3 to replicate objects, the -// IAM role that Amazon S3 can assume to replicate objects on your behalf, and -// other relevant information. You can invoke this request for a specific Amazon -// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) -// condition key. A replication configuration must include at least one rule, and -// can contain a maximum of 1,000. Each rule identifies a subset of objects to -// replicate by filtering the objects in the source bucket. To choose additional -// subsets of objects to replicate, add a rule for each subset. To specify a subset -// of the objects in the source bucket to apply a replication rule to, add the -// Filter element as a child of the Rule element. You can filter objects based on -// an object key prefix, one or more object tags, or both. When you add the Filter -// element in the configuration, you must also add the following elements: -// DeleteMarkerReplication , Status , and Priority . If you are using an earlier -// version of the replication configuration, Amazon S3 handles replication of -// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) -// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't +// This operation is not supported by directory buckets. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see [Replication]in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets where +// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume +// to replicate objects on your behalf, and other relevant information. You can +// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] +// aws:RequestedRegion condition key. +// +// A replication configuration must include at least one rule, and can contain a +// maximum of 1,000. Each rule identifies a subset of objects to replicate by +// filtering the objects in the source bucket. To choose additional subsets of +// objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. When +// you add the Filter element in the configuration, you must also add the following +// elements: DeleteMarkerReplication , Status , and Priority . +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// For information about enabling versioning on a bucket, see [Using Versioning]. +// +// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't // replicate objects that are stored at rest using server-side encryption with KMS // keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: // SourceSelectionCriteria , SseKmsEncryptedObjects , Status , // EncryptionConfiguration , and ReplicaKmsKeyID . For information about -// replication configuration, see Replicating Objects Created with SSE Using KMS -// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html) -// . For information on PutBucketReplication errors, see List of -// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. +// +// For information on PutBucketReplication errors, see [List of replication-related error codes] +// // Permissions To create a PutBucketReplication request, you must have -// s3:PutReplicationConfiguration permissions for the bucket. By default, a -// resource owner, in this case the Amazon Web Services account that created the -// bucket, can perform this operation. The resource owner can also grant others -// permissions to perform the operation. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . To perform this operation, the user or role performing the action must have -// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. The following operations are related to PutBucketReplication : -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// s3:PutReplicationConfiguration permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account that +// created the bucket, can perform this operation. The resource owner can also +// grant others permissions to perform the operation. For more information about +// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// To perform this operation, the user or role performing the action must have the [iam:PassRole] +// permission. +// +// The following operations are related to PutBucketReplication : +// +// [GetBucketReplication] +// +// [DeleteBucketReplication] +// +// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion +// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html +// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { if params == nil { params = &PutBucketReplicationInput{} @@ -88,17 +112,23 @@ type PutBucketReplicationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +143,7 @@ type PutBucketReplicationInput struct { } func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -146,25 +177,25 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -182,6 +213,15 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -191,7 +231,7 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go index d1dc5a765494..e0f9f31ef72c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the request payment -// configuration for a bucket. By default, the bucket owner pays for downloads from -// the bucket. This configuration parameter enables the bucket owner (only) to -// specify that the person requesting the download will be charged for the -// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to PutBucketRequestPayment : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// This operation is not supported by directory buckets. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download will +// be charged for the download. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to PutBucketRequestPayment : +// +// [CreateBucket] +// +// [GetBucketRequestPayment] +// +// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { if params == nil { params = &PutBucketRequestPaymentInput{} @@ -54,17 +62,23 @@ type PutBucketRequestPaymentInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -76,6 +90,7 @@ type PutBucketRequestPaymentInput struct { } func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -109,25 +124,25 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -145,6 +160,15 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -154,7 +178,7 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go index 725facc11380..2ed4fb6a1765 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -15,39 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the tags for a -// bucket. Use tags to organize your Amazon Web Services bill to reflect your own -// cost structure. To do this, sign up to get your Amazon Web Services account bill -// with tag key values included. Then, to see the cost of combined resources, -// organize your billing information according to resources with the same tag key -// values. For example, you can tag several resources with a specific application -// name, and then organize your billing information to see the total cost of that -// application across several services. For more information, see Cost Allocation -// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . When this operation sets the tags for a bucket, it will overwrite any current +// This operation is not supported by directory buckets. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill with +// tag key values included. Then, to see the cost of combined resources, organize +// your billing information according to resources with the same tag key values. +// For example, you can tag several resources with a specific application name, and +// then organize your billing information to see the total cost of that application +// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. +// +// When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to an -// existing list of tags. To use this operation, you must have permissions to -// perform the s3:PutBucketTagging action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketTagging has the following special errors. For more Amazon S3 errors -// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// existing list of tags. +// +// To use this operation, you must have permissions to perform the +// s3:PutBucketTagging action. The bucket owner has this permission by default and +// can grant this permission to others. For more information about permissions, see +// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Using Cost -// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . +// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // bucket. // // The following operations are related to PutBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [GetBucketTagging] +// +// [DeleteBucketTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html +// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { params = &PutBucketTaggingInput{} @@ -79,17 +94,23 @@ type PutBucketTaggingInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -101,6 +122,7 @@ type PutBucketTaggingInput struct { } func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -134,25 +156,25 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -170,6 +192,15 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -179,7 +210,7 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go index c2b751ab77a4..25b038ecf333 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -15,28 +15,47 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the versioning state -// of an existing bucket. You can set the versioning state with one of the -// following values: Enabled—Enables versioning for the objects in the bucket. All -// objects added to the bucket receive a unique version ID. Suspended—Disables -// versioning for the objects in the bucket. All objects added to the bucket -// receive the version ID null. If the versioning state has never been set on a -// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// request does not return a versioning state value. In order to enable MFA Delete, -// you must be the bucket owner. If you are the bucket owner and want to enable MFA -// Delete in the bucket versioning configuration, you must include the x-amz-mfa -// request header and the Status and the MfaDelete request elements in a request -// to set the versioning state of the bucket. If you have an object expiration -// lifecycle configuration in your non-versioned bucket and you want to maintain -// the same permanent delete behavior when you enable versioning, you must add a -// noncurrent expiration policy. The noncurrent expiration lifecycle configuration -// will manage the deletes of the noncurrent object versions in the version-enabled -// bucket. (A version-enabled bucket maintains one current and zero or more -// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config) -// . The following operations are related to PutBucketVersioning : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// This operation is not supported by directory buckets. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added to +// the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects added +// to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a [GetBucketVersioning]request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning +// configuration, you must include the x-amz-mfa request header and the Status and +// the MfaDelete request elements in a request to set the versioning state of the +// bucket. +// +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see [Lifecycle and Versioning]. +// +// The following operations are related to PutBucketVersioning : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetBucketVersioning] +// +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { if params == nil { params = &PutBucketVersioningInput{} @@ -68,17 +87,23 @@ type PutBucketVersioningInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -94,6 +119,7 @@ type PutBucketVersioningInput struct { } func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -127,25 +153,25 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -163,6 +189,15 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -172,7 +207,7 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go index 27555453080f..65a7f39e44a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -15,21 +15,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the configuration of -// the website that is specified in the website subresource. To configure a bucket -// as a website, you can add this subresource on the bucket with website -// configuration information such as the file name of the index document and any -// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This PUT action requires the S3:PutBucketWebsite permission. By default, only +// This operation is not supported by directory buckets. +// +// Sets the configuration of the website that is specified in the website +// subresource. To configure a bucket as a website, you can add this subresource on +// the bucket with website configuration information such as the file name of the +// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, only // the bucket owner can configure the website attached to a bucket; however, bucket // owners can allow other users to set the website configuration by writing a -// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect -// all website requests sent to the bucket's website endpoint, you add a website -// configuration with the following elements. Because all requests are sent to -// another website, you don't need to provide index document name for the bucket. +// bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you add +// a website configuration with the following elements. Because all requests are +// sent to another website, you don't need to provide index document name for the +// bucket. +// // - WebsiteConfiguration +// // - RedirectAllRequestsTo +// // - HostName +// // - Protocol // // If you want granular control over redirects, you can use the following elements @@ -37,27 +45,47 @@ import ( // information about the redirect destination. In this case, the website // configuration must provide an index document for the bucket, because some // requests might not be redirected. +// // - WebsiteConfiguration +// // - IndexDocument +// // - Suffix +// // - ErrorDocument +// // - Key +// // - RoutingRules +// // - RoutingRule +// // - Condition +// // - HttpErrorCodeReturnedEquals +// // - KeyPrefixEquals +// // - Redirect +// // - Protocol +// // - HostName +// // - ReplaceKeyPrefixWith +// // - ReplaceKeyWith +// // - HttpRedirectCode // // Amazon S3 has a limitation of 50 routing rules per website configuration. If // you require more than 50 routing rules, you can use object redirect. For more -// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. +// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. +// +// The maximum request length is limited to 128 KB. +// +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { if params == nil { params = &PutBucketWebsiteInput{} @@ -89,17 +117,23 @@ type PutBucketWebsiteInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -111,6 +145,7 @@ type PutBucketWebsiteInput struct { } func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -144,25 +179,25 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -180,6 +215,15 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -189,7 +233,7 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go index 1bade82e9a3e..270f19f1a4f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -18,51 +18,60 @@ import ( ) // Adds an object to a bucket. +// // - Amazon S3 never adds partial objects; if you receive a success response, // Amazon S3 added the entire object to the bucket. You cannot use PutObject to // only update a single piece of metadata for an existing object. You must put the // entire object with updated metadata if you want to update some values. +// // - If your bucket uses the bucket owner enforced setting for Object Ownership, // ACLs are disabled and no longer affect permissions. All objects written to the // bucket by any account will be owned by the bucket owner. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// Path-style requests are not supported. For more information, see [Regional and Zonal endpoints]in the +// Amazon S3 User Guide. // // Amazon S3 is a distributed system. If it receives multiple write requests for // the same object simultaneously, it overwrites all but the last object written. // However, Amazon S3 provides features that can modify this behavior: +// // - S3 Object Lock - To prevent objects from being deleted or overwritten, you -// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. +// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. +// // - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 // receives multiple write requests for the same object simultaneously, it stores // all versions of the objects. For each write request that is made to the same // object, Amazon S3 automatically generates a unique version ID of that object // being stored in Amazon S3. You can retrieve, replace, or delete any version of -// the object. For more information about versioning, see Adding Objects to -// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) -// in the Amazon S3 User Guide. For information about returning the versioning -// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// . This functionality is not supported for directory buckets. +// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User +// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] +// . +// +// This functionality is not supported for directory buckets. // // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your PutObject request includes specific headers. +// // - s3:PutObject - To successfully complete the PutObject request, you must // always have the s3:PutObject permission on a bucket to add an object to it. +// // - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject // request, you must have the s3:PutObjectAcl . +// // - s3:PutObjectTagging - To successfully set the tag-set with your PutObject // request, you must have the s3:PutObjectTagging . +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -70,24 +79,36 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Data integrity with Content-MD5 +// // - General purpose bucket - To ensure that data is not corrupted traversing // the network, use the Content-MD5 header. When you use this header, Amazon S3 // checks the object against the provided MD5 value and, if they do not match, // Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 // digest, you can calculate the MD5 while putting the object to Amazon S3 and // compare the returned ETag to the calculated MD5 value. +// // - Directory bucket - This functionality is not supported for directory // buckets. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about -// related Amazon S3 APIs, see the following: -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// For more information about related Amazon S3 APIs, see the following: +// +// [CopyObject] +// +// [DeleteObject] +// +// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { if params == nil { params = &PutObjectInput{} @@ -105,31 +126,39 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns . type PutObjectInput struct { - // The bucket name to which the PUT action was initiated. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -139,26 +168,33 @@ type PutObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. When adding a new object, you can use headers to - // grant ACL-based permissions to individual Amazon Web Services accounts or to - // predefined groups defined by Amazon S3. These permissions are then added to the - // ACL on the object. By default, all objects are private. Only the owner has full - // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) - // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses - // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and - // no longer affect permissions. Buckets that use this setting only accept PUT - // requests that don't specify an ACL or PUT requests that specify bucket owner - // full control ACLs, such as the bucket-owner-full-control canned ACL or an - // equivalent form of this ACL expressed in the XML format. PUT requests that - // contain other ACLs (for example, custom grants to certain Amazon Web Services - // accounts) fail and return a 400 error with the error code - // AccessControlListNotSupported . For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon + // S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions to + // individual Amazon Web Services accounts or to predefined groups defined by + // Amazon S3. These permissions are then added to the ACL on the object. By + // default, all objects are private. Only the owner has full access control. For + // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL + // expressed in the XML format. PUT requests that contain other ACLs (for example, + // custom grants to certain Amazon Web Services accounts) fail and return a 400 + // error with the error code AccessControlListNotSupported . For more information, + // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Object data. @@ -167,102 +203,124 @@ type PutObjectInput struct { // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - // bucket-level settings for S3 Bucket Key. This functionality is not supported for - // directory buckets. + // encryption with SSE-KMS. + // + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) - // . + // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. + // + // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 CacheControl *string // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided ChecksumAlgorithm + // parameter and uses the checksum algorithm that matches the provided value in + // x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string - // Specifies presentational information for the object. For more information, see - // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4) - // . + // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. + // + // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding) - // . + // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding ContentEncoding *string // The language the content is in. ContentLanguage *string // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) - // . + // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length ContentLength *int64 // The base64-encoded 128-bit MD5 digest of the message (without the headers) // according to RFC 1864. This header can be used as a message integrity check to // verify that the data is the same data that was originally sent. Although it is // optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, see - // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // . The Content-MD5 header is required for any request to upload an object with a + // integrity check. For more information about REST request authentication, see [REST Authentication]. + // + // The Content-MD5 header is required for any request to upload an object with a // retention period configured using Amazon S3 Object Lock. For more information - // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // about Amazon S3 Object Lock, see [Amazon S3 Object Lock Overview]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Amazon S3 Object Lock Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html ContentMD5 *string // A standard MIME type describing the format of the contents. For more - // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type ContentType *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -271,27 +329,36 @@ type PutObjectInput struct { ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. For more - // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. + // + // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string @@ -299,46 +366,55 @@ type PutObjectInput struct { Metadata map[string]string // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to this object. This functionality - // is not supported for directory buckets. + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. This functionality is not supported for - // directory buckets. + // formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object @@ -346,8 +422,9 @@ type PutObjectInput struct { // JSON with the encryption context key-value pairs. This value is stored as object // metadata and automatically gets passed on to Amazon Web Services KMS for future // GetObject or CopyObject operations on this object. This value must be - // explicitly added during CopyObject operations. This functionality is not - // supported for directory buckets. + // explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , @@ -358,59 +435,79 @@ type PutObjectInput struct { // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not // exist in the same account that's issuing the command, you must use the full ARN - // and not just the ID. This functionality is not supported for directory buckets. + // and not just the ID. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm that was used when you store this object - // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose - // buckets - You have four mutually exclusive options to protect data using - // server-side encryption in Amazon S3, depending on how you choose to manage the - // encryption keys. Specifically, the encryption key options are Amazon S3 managed - // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and - // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose to + // manage the encryption keys. Specifically, the encryption key options are Amazon + // S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), + // and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can // optionally tell Amazon S3 to encrypt data at rest by using server-side - // encryption with other key options. For more information, see Using Server-Side - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) - // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is - // supported. + // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 + // User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption with + // Amazon S3 managed keys (SSE-S3) ( AES256 ) value is supported. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // // - For directory buckets, only the S3 Express One Zone storage class is // supported to store newly created objects. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") This functionality is not supported for - // directory buckets. + // parameters. (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the // value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) - // in the Amazon S3 User Guide. In the following example, the request header sets - // the redirect to an object (anotherPage.html) in the same bucket: - // x-amz-website-redirect-location: /anotherPage.html In the following example, the - // request header sets the object redirect to another website: - // x-amz-website-redirect-location: http://www.example.com/ For more information - // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -419,8 +516,9 @@ func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be @@ -428,8 +526,10 @@ type PutObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -437,8 +537,10 @@ type PutObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -446,8 +548,10 @@ type PutObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -455,71 +559,89 @@ type PutObjectOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string - // Entity tag for the uploaded object. General purpose buckets - To ensure that - // data is not corrupted traversing the network, for objects where the ETag is the - // MD5 digest of the object, you can calculate the MD5 while putting an object to - // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory - // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of - // the object. + // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing the + // network, for objects where the ETag is the MD5 digest of the object, you can + // calculate the MD5 while putting an object to Amazon S3 and compare the returned + // ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the MD5 + // digest of the object. ETag *string - // If the expiration is configured for the object (see - // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ) in the Amazon S3 User Guide, the response includes this header. It includes - // the expiry-date and rule-id key-value pairs that provide information about - // object expiration. The value of the rule-id is URL-encoded. This functionality - // is not supported for directory buckets. + // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User + // Guide, the response includes this header. It includes the expiry-date and + // rule-id key-value pairs that provide information about object expiration. The + // value of the rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string // holding JSON with the encryption context key-value pairs. This value is stored // as object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. This - // functionality is not supported for directory buckets. + // for future GetObject or CopyObject operations on this object. + // + // This functionality is not supported for directory buckets. SSEKMSEncryptionContext *string // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , // this header indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption - // Version ID of the object. If you enable versioning for a bucket, Amazon S3 - // automatically generates a unique version ID for the object being stored. Amazon - // S3 returns this ID in the response. When you enable versioning for a bucket, if - // Amazon S3 receives multiple write requests for the same object simultaneously, - // it stores all of the objects. For more information about versioning, see Adding - // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) - // in the Amazon S3 User Guide. For information about returning the versioning - // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) - // . This functionality is not supported for directory buckets. + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates a + // unique version ID for the object being stored. Amazon S3 returns this ID in the + // response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all of the + // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User + // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. + // + // This functionality is not supported for directory buckets. + // + // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html VersionId *string // Metadata pertaining to the operation's result. @@ -550,25 +672,25 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -586,6 +708,15 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutObjectValidationMiddleware(stack); err != nil { return err } @@ -598,7 +729,7 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = add100Continue(stack, options); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go index 5716f550f6bf..b0b50898aa28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -14,87 +14,152 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Uses the acl subresource -// to set the access control list (ACL) permissions for a new or existing object in -// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an -// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 -// on Outposts. Depending on your application needs, you can choose to set the ACL -// on an object using either the request body or the headers. For example, if you -// have an existing application that updates a bucket ACL using the request body, -// you can continue to use that approach. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions using -// one of the following methods: +// This operation is not supported by directory buckets. +// +// Uses the acl subresource to set the access control list (ACL) permissions for a +// new or existing object in an S3 bucket. You must have the WRITE_ACP permission +// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User +// Guide. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an object +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, you can +// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User +// Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions using one of the following methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-ac l. If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use x-amz-acl // header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control List -// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants list objects permission to the two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-read: -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-read header grants list objects +// +// permission to the two Amazon Web Services accounts identified by their email +// addresses. +// +// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved -// to the CanonicalUser and, in a response to a GET Object acl request, appears as -// the CanonicalUser. Using email addresses to specify a grantee is only supported -// in the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>lt;/Grantee> +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // Versioning The ACL of an object is set at the object version level. By default, // PUT sets the ACL of the current version of an object. To set the ACL of a -// different version, use the versionId subresource. The following operations are -// related to PutObjectAcl : -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// different version, use the versionId subresource. +// +// The following operations are related to PutObjectAcl : +// +// [CopyObject] +// +// [GetObject] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { if params == nil { params = &PutObjectAclInput{} @@ -113,6 +178,7 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the ACL. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -120,15 +186,18 @@ type PutObjectAclInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,8 +207,9 @@ type PutObjectAclInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // . + // The canned ACL to apply to the object. For more information, see [Canned ACL]. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL ACL types.ObjectCannedACL // Contains the elements that set the ACL permissions for an object per grantee. @@ -149,17 +219,23 @@ type PutObjectAclInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The base64-encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.>] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -168,44 +244,54 @@ type PutObjectAclInput struct { ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for Amazon S3 on Outposts. + // bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for Amazon S3 on Outposts. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // Amazon S3 on Outposts. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -214,7 +300,9 @@ func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -245,25 +333,25 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -281,6 +369,15 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { return err } @@ -290,7 +387,7 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go index 3fa38af3ebac..cbd9af75ef74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -14,9 +14,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Applies a legal hold -// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. +// This operation is not supported by directory buckets. +// +// Applies a legal hold configuration to the specified object. For more +// information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { params = &PutObjectLegalHoldInput{} @@ -35,6 +40,7 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold on. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -42,8 +48,9 @@ type PutObjectLegalHoldInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -57,15 +64,19 @@ type PutObjectLegalHoldInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -81,10 +92,12 @@ type PutObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object that you want to place a legal hold on. @@ -94,6 +107,7 @@ type PutObjectLegalHoldInput struct { } func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -101,7 +115,9 @@ func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -132,25 +148,25 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -168,6 +184,15 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -177,7 +202,7 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go index 49425c8f9f79..9a737f28534a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -14,17 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object Lock -// configuration on the specified bucket. The rule specified in the Object Lock -// configuration will be applied by default to every new object placed in the -// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . +// This operation is not supported by directory buckets. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see [Locking Objects]. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. +// // - You can enable Object Lock for new or existing buckets. For more -// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html) -// . +// information, see [Configuring Object Lock]. +// +// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} @@ -51,15 +56,19 @@ type PutObjectLockConfigurationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -74,10 +83,12 @@ type PutObjectLockConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // A token to allow Object Lock to be enabled for an existing bucket. @@ -87,6 +98,7 @@ type PutObjectLockConfigurationInput struct { } func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -94,7 +106,9 @@ func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParamet type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -125,25 +139,25 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -161,6 +175,15 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -170,7 +193,7 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go index 5dfb98f3d3a4..00709c0774c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -14,12 +14,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object -// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . Users or accounts require the s3:PutObjectRetention permission in order to -// place an Object Retention configuration on objects. Bypassing a Governance +// This operation is not supported by directory buckets. +// +// Places an Object Retention configuration on an object. For more information, +// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order +// to place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. +// // This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -38,15 +42,18 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object - // Retention configuration to. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // Retention configuration to. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -64,15 +71,19 @@ type PutObjectRetentionInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -84,10 +95,12 @@ type PutObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The container element for the Object Retention configuration. @@ -101,6 +114,7 @@ type PutObjectRetentionInput struct { } func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -108,7 +122,9 @@ func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -139,25 +155,25 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -175,6 +191,15 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -184,7 +209,7 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go index 8b42d43d1cb9..54485241dbfd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -14,35 +14,50 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the supplied tag-set -// to an object that already exists in a bucket. A tag is a key-value pair. For -// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . You can associate tags with an object by sending a PUT request against the +// This operation is not supported by directory buckets. +// +// Sets the supplied tag-set to an object that already exists in a bucket. A tag +// is a key-value pair. For more information, see [Object Tagging]. +// +// You can associate tags with an object by sending a PUT request against the // tagging subresource that is associated with the object. You can retrieve tags by -// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// . For tagging-related restrictions related to characters and encodings, see Tag -// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// sending a GET request. For more information, see [GetObjectTagging]. +// +// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// // To use this operation, you must have permission to perform the // s3:PutObjectTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. To put tags of any other version, use the -// versionId query parameter. You also need permission for the -// s3:PutObjectVersionTagging action. PutObjectTagging has the following special -// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// can grant this permission to others. +// +// To put tags of any other version, use the versionId query parameter. You also +// need permission for the s3:PutObjectVersionTagging action. +// +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Object -// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . +// the tag did not pass input validation. For more information, see [Object Tagging]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // object. // // The following operations are related to PutObjectTagging : -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// [GetObjectTagging] +// +// [DeleteObjectTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html +// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { if params == nil { params = &PutObjectTaggingInput{} @@ -60,23 +75,27 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI type PutObjectTaggingInput struct { - // The bucket name containing the object. Access points - When you use this action - // with an access point, you must provide the alias of the access point in place of - // the bucket name or specify the access point ARN. When using the access point - // ARN, you must direct requests to the access point hostname. The access point - // hostname takes the form + // The bucket name containing the object. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -95,15 +114,19 @@ type PutObjectTaggingInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -115,10 +138,12 @@ type PutObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object that the tag-set will be added to. @@ -128,6 +153,7 @@ type PutObjectTaggingInput struct { } func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -165,25 +191,25 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -201,6 +227,15 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -210,7 +245,7 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go index ab0b5405e88d..97b8a8ae9e26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -15,22 +15,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported by directory buckets. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the // account, Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to PutPublicAccessBlock : -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to PutPublicAccessBlock : +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { params = &PutPublicAccessBlockInput{} @@ -56,9 +72,10 @@ type PutPublicAccessBlockInput struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more - // information about when Amazon S3 considers a bucket or object public, see The - // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon S3 User Guide. + // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in + // the Amazon S3 User Guide. + // + // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status // // This member is required. PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration @@ -67,15 +84,19 @@ type PutPublicAccessBlockInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutPublicAccessBlock request body. For requests made using - // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services - // SDKs, this field is calculated automatically. + // The MD5 hash of the PutPublicAccessBlock request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -87,6 +108,7 @@ type PutPublicAccessBlockInput struct { } func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -120,25 +142,25 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -156,6 +178,15 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -165,7 +196,7 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go index a84b5326ce01..53f7ffd3d90d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -14,71 +14,51 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Restores an archived copy -// of an object back into Amazon S3 This functionality is not supported for Amazon -// S3 on Outposts. This action performs the following types of requests: -// - select - Perform a select query on an archived object +// This operation is not supported by directory buckets. +// +// # Restores an archived copy of an object back into Amazon S3 +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// // - restore an archive - Restore an archived object // // For more information about the S3 structure in the request body, see the // following: -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide -// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide -// -// Define the SQL expression for the SELECT type of restoration for your query in -// the request body's SelectParameters structure. You can use expressions like the -// following examples. -// - The following expression returns all records from the specified object. -// SELECT * FROM Object -// - Assuming that you are not using any headers for data stored in the object, -// you can specify columns with positional headers. SELECT s._1, s._2 FROM -// Object s WHERE s._3 > 100 -// - If you have headers and you set the fileHeaderInfo in the CSV structure in -// the request body to USE , you can specify headers in the query. (If you set -// the fileHeaderInfo field to IGNORE , the first row is skipped for the query.) -// You cannot mix ordinal positions with header column names. SELECT s.Id, -// s.FirstName, s.SSN FROM S3Object s -// -// When making a select request, you can also do the following: -// - To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// - Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded query -// results. -// -// The following are additional important facts about the select feature: -// - The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// configuration. -// - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests. -// - Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409 . +// +// [PutObject] +// +// [Managing Access with ACLs] +// - in the Amazon S3 User Guide +// +// [Protecting Data Using Server-Side Encryption] +// - in the Amazon S3 User Guide // // Permissions To use this operation, you must have permissions to perform the // s3:RestoreObject action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the -// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive -// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers, are not accessible in real time. For objects in the S3 Glacier -// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage -// classes, you must first initiate a restore request, and then wait until a -// temporary copy of the object is available. If you want a permanent copy of the -// object, create a copy of it in the Amazon S3 Standard storage class in your S3 -// bucket. To access an archived object, you must restore the object for the -// duration (number of days) that you specify. For objects in the Archive Access or -// Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a -// restore request, and then wait until the object is moved into the Frequent -// Access tier. To restore a specific object version, you can provide a version ID. -// If you don't provide a version ID, Amazon S3 restores the current version. When -// restoring an archived object, you can specify one of the following data access -// tier options in the Tier element of the request body: +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval +// Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 +// Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are +// not accessible in real time. For objects in the S3 Glacier Flexible Retrieval +// Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first +// initiate a restore request, and then wait until a temporary copy of the object +// is available. If you want a permanent copy of the object, create a copy of it in +// the Amazon S3 Standard storage class in your S3 bucket. To access an archived +// object, you must restore the object for the duration (number of days) that you +// specify. For objects in the Archive Access or Deep Archive Access tiers of S3 +// Intelligent-Tiering, you must first initiate a restore request, and then wait +// until the object is moved into the Frequent Access tier. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: +// // - Expedited - Expedited retrievals allow you to quickly access your data // stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or // S3 Intelligent-Tiering Archive tier when occasional urgent requests for @@ -88,6 +68,7 @@ import ( // Expedited retrievals is available when you need it. Expedited retrievals and // provisioned capacity are not available for objects stored in the S3 Glacier Deep // Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// // - Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval requests // that do not specify the retrieval option. Standard retrievals typically finish @@ -96,6 +77,7 @@ import ( // typically finish within 12 hours for objects stored in the S3 Glacier Deep // Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard // retrievals are free for objects stored in S3 Intelligent-Tiering. +// // - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible // Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve // large amounts, even petabytes, of data at no cost. Bulk retrievals typically @@ -107,29 +89,33 @@ import ( // Deep Archive tier. // // For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to -// change the restore speed to a faster speed while it is in progress. For more -// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon S3 User Guide. To get the status of object restoration, you can -// send a HEAD request. Operations return the x-amz-restore header, which provides -// information about the restoration status, in the response. You can use Amazon S3 -// event notifications to notify you when a restore is initiated or completed. For -// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon S3 User Guide. After restoring an archived object, you can update -// the restoration period by reissuing the request with a new period. Amazon S3 -// updates the restoration period relative to the current time and charges only for -// the request-there are no data transfer charges. You cannot update the -// restoration period when Amazon S3 is actively processing your current restore -// request for the object. If your bucket has a lifecycle configuration with a rule -// that includes an expiration action, the object expiration overrides the life -// span that you specify in a restore request. For example, if you restore an -// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon -// S3 deletes the object in 3 days. For more information about lifecycle -// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK -// or 202 Accepted status code. +// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to a +// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon +// S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. +// Operations return the x-amz-restore header, which provides information about +// the restoration status, in the response. You can use Amazon S3 event +// notifications to notify you when a restore is initiated or completed. For more +// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period by +// reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there are +// no data transfer charges. You cannot update the restoration period when Amazon +// S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy for 10 +// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the +// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] +// in Amazon S3 User Guide. +// +// Responses A successful action returns either the 200 OK or 202 Accepted status +// code. // // - If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -141,8 +127,7 @@ import ( // // - Code: RestoreAlreadyInProgress // -// - Cause: Object restore is already in progress. (This error does not apply to -// SELECT type requests.) +// - Cause: Object restore is already in progress. // // - HTTP Status Code: 409 Conflict // @@ -160,8 +145,22 @@ import ( // - SOAP Fault Code Prefix: N/A // // The following operations are related to RestoreObject : -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketNotificationConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { if params == nil { params = &RestoreObjectInput{} @@ -179,23 +178,27 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, type RestoreObjectInput struct { - // The bucket name containing the object to restore. Access points - When you use - // this action with an access point, you must provide the alias of the access point - // in place of the bucket name or specify the access point ARN. When using the - // access point ARN, you must direct requests to the access point hostname. The - // access point hostname takes the form + // The bucket name containing the object to restore. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -209,10 +212,13 @@ type RestoreObjectInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -224,10 +230,12 @@ type RestoreObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Container for restore job parameters. @@ -240,6 +248,7 @@ type RestoreObjectInput struct { } func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -247,7 +256,9 @@ func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Indicates the path in the provided S3 output location where Select results will @@ -282,25 +293,25 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -318,6 +329,15 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { return err } @@ -327,7 +347,7 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go index 888ec9c250b6..c8663907e65b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -11,71 +11,97 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithysync "github.com/aws/smithy-go/sync" - smithyhttp "github.com/aws/smithy-go/transport/http" "sync" ) -// This operation is not supported by directory buckets. This action filters the -// contents of an Amazon S3 object based on a simple structured query language -// (SQL) statement. In the request, along with the SQL expression, you must also -// specify a data serialization format (JSON, CSV, or Apache Parquet) of the -// object. Amazon S3 uses this format to parse object data into records, and -// returns only records that match the specified SQL expression. You must also -// specify the data serialization format for the response. This functionality is -// not supported for Amazon S3 on Outposts. For more information about Amazon S3 -// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject -// permission for this operation. Amazon S3 Select does not support anonymous -// access. For more information about permissions, see Specifying Permissions in a -// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to -// query objects that have the following format properties: +// This operation is not supported by directory buckets. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the SQL +// expression, you must also specify a data serialization format (JSON, CSV, or +// Apache Parquet) of the object. Amazon S3 uses this format to parse object data +// into records, and returns only records that match the specified SQL expression. +// You must also specify the data serialization format for the response. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User +// Guide. +// +// Permissions You must have the s3:GetObject permission for this operation. +// Amazon S3 Select does not support anonymous access. For more information about +// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. +// +// Object Data Formats You can use Amazon S3 Select to query objects that have the +// following format properties: +// // - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// // - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// // - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. // GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports // for CSV and JSON files. Amazon S3 Select supports columnar compression for // Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object // compression for Parquet objects. +// // - Server-side encryption - Amazon S3 Select supports querying objects that -// are protected with server-side encryption. For objects that are encrypted with -// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use -// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 -// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. For -// more information about server-side encryption, including SSE-S3 and SSE-KMS, see -// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide. +// are protected with server-side encryption. +// +// For objects that are encrypted with customer-provided encryption keys (SSE-C), +// +// you must use HTTPS, and you must use the headers that are documented in the [GetObject]. +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. +// +// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon +// +// Web Services KMS keys (SSE-KMS), server-side encryption is handled +// transparently, so you don't need to specify anything. For more information about +// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 +// User Guide. // // Working with the Response Body Given the response size is unknown, Amazon S3 // Select streams the response as a series of messages and includes a // Transfer-Encoding header with chunked as its value in the response. For more -// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) -// . GetObject Support The SelectObjectContent action does not support the -// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . +// information, see [Appendix: SelectObjectContent Response]. +// +// GetObject Support The SelectObjectContent action does not support the following +// GetObject functionality. For more information, see [GetObject]. +// // - Range : Although you can specify a scan range for an Amazon S3 Select -// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) -// in the request parameters), you cannot specify the range of bytes of an object -// to return. +// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of +// bytes of an object to return. +// // - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the // ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING // storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or // REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or // DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For -// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) -// in the Amazon S3 User Guide. +// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] // -// Special Errors For a list of special errors for this operation, see List of -// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // The following operations are related to SelectObjectContent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// [GetObject] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration] +// +// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html +// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange +// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html +// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { if params == nil { params = &SelectObjectContentInput{} @@ -96,9 +122,9 @@ func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectCo // expression, you must specify a data serialization format (JSON or CSV) of the // object. Amazon S3 uses this to parse object data into records. It returns only // records that match the specified SQL expression. You must also specify the data -// serialization format for the response. For more information, see S3Select API -// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) -// . +// serialization format for the response. For more information, see [S3Select API Documentation]. +// +// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html type SelectObjectContentInput struct { // The S3 bucket. @@ -141,30 +167,37 @@ type SelectObjectContentInput struct { // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. ScanRange may be - // used in the following ways: + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRange may be used in the following ways: + // // - 50100 - process only the records starting between the bytes 50 and 100 // (inclusive, counting from zero) + // // - 50 - process only the records starting after the byte 50 + // // - 50 - process only the records within the last 50 bytes of the file. ScanRange *types.ScanRange @@ -172,6 +205,7 @@ type SelectObjectContentInput struct { } func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -215,25 +249,25 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -245,6 +279,15 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { return err } @@ -254,7 +297,7 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go index 53507fbabdf5..231da759d5c8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -16,38 +16,48 @@ import ( "io" ) -// Uploads a part in a multipart upload. In this operation, you provide new data -// as a part of an object in your request. However, you have an option to specify -// your existing Amazon S3 object as a data source for the part you are uploading. -// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// ) before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier that you must include in your -// upload part request. Part numbers can be any number from 1 to 10,000, inclusive. -// A part number uniquely identifies a part and also defines its position within -// the object being created. If you upload a new part using the same part number -// that was used with a previous part, the previously uploaded part is overwritten. +// Uploads a part in a multipart upload. +// +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as a data +// source for the part you are uploading. To upload a part from an existing object, +// you use the [UploadPartCopy]operation. +// +// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In +// response to your initiate request, Amazon S3 returns an upload ID, a unique +// identifier that you must include in your upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object being +// created. If you upload a new part using the same part number that was used with +// a previous part, the previously uploaded part is overwritten. +// // For information about maximum and minimum part sizes and other multipart upload -// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. After you initiate multipart upload and upload one -// or more parts, you must either complete or abort multipart upload in order to -// stop getting charged for storage of the uploaded parts. Only after you either -// complete or abort multipart upload, Amazon S3 frees up the parts storage and -// stops charging you for the parts storage. For more information on multipart -// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged for +// storage of the uploaded parts. Only after you either complete or abort multipart +// upload, Amazon S3 frees up the parts storage and stops charging you for the +// parts storage. +// +// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Permissions +// // - General purpose bucket permissions - For information on the permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -55,19 +65,19 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // -// Data integrity General purpose bucket - To ensure that data is not corrupted +// Data integrity General purpose bucket - To ensure that data is not corrupted // traversing the network, specify the Content-MD5 header in the upload part // request. Amazon S3 checks the part data against the provided MD5 value. If they // do not match, Amazon S3 returns an error. If the upload request is signed with // Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 -// header as a checksum instead of Content-MD5 . For more information see -// Authenticating Requests: Using the Authorization Header (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) -// . Directory buckets - MD5 is not supported by directory buckets. You can use -// checksum algorithms to check object integrity. Encryption +// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption // - General purpose bucket - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. You have mutually exclusive options to @@ -78,37 +88,70 @@ import ( // encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally // tell Amazon S3 to encrypt data at rest using server-side encryption with other // key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is -// supported by the S3 Multipart Upload operations. Unless you are using a -// customer-provided encryption key (SSE-C), you don't need to specify the -// encryption parameters in each UploadPart request. Instead, you only need to -// specify the server-side encryption parameters in the initial Initiate Multipart -// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . If you request server-side encryption using a customer-provided encryption key -// (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following request headers. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 -// - Directory bucket - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. -// -// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. Special errors +// (SSE-KMS) or provide your own encryption key (SSE-C). +// +// Server-side encryption is supported by the S3 Multipart Upload operations. +// +// Unless you are using a customer-provided encryption key (SSE-C), you don't need +// to specify the encryption parameters in each UploadPart request. Instead, you +// only need to specify the server-side encryption parameters in the initial +// Initiate Multipart request. For more information, see [CreateMultipartUpload]. +// +// If you request server-side encryption using a customer-provided encryption key +// +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// - Directory bucket - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// For more information, see [Using Server-Side Encryption] in the Amazon S3 User Guide. +// +// Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPart : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to UploadPart : +// +// [CreateMultipartUpload] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { if params == nil { params = &UploadPartInput{} @@ -126,31 +169,39 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns type UploadPartInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -178,40 +229,48 @@ type UploadPartInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must - // be the same for all parts and it match the checksum value supplied in the - // CreateMultipartUpload request. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // This checksum algorithm must be the same for all parts and it match the + // checksum value supplied in the CreateMultipartUpload request. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Size of the body in bytes. This parameter is useful when the size of the body @@ -220,8 +279,9 @@ type UploadPartInput struct { // The base64-encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. This functionality is not supported for - // directory buckets. + // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -233,14 +293,17 @@ type UploadPartInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -248,20 +311,23 @@ type UploadPartInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported for directory buckets. + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -270,8 +336,9 @@ func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be @@ -279,8 +346,10 @@ type UploadPartOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -288,8 +357,10 @@ type UploadPartOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -297,8 +368,10 @@ type UploadPartOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -306,37 +379,46 @@ type UploadPartOutput struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag for the uploaded object. ETag *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -367,25 +449,25 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -403,6 +485,15 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpUploadPartValidationMiddleware(stack); err != nil { return err } @@ -415,7 +506,7 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = add100Continue(stack, options); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go index 1d48a7beaa12..0bd16a3a5af8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -18,90 +18,134 @@ import ( // Uploads a part by copying data from an existing object as data source. To // specify the data source, you add the request header x-amz-copy-source in your // request. To specify a byte range, you add the request header -// x-amz-copy-source-range in your request. For information about maximum and -// minimum part sizes and other multipart upload specifications, see Multipart -// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. Instead of copying data from an existing object as -// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// action to upload new data as a part of an object in your request. You must -// initiate a multipart upload before you can upload any part. In response to your -// initiate request, Amazon S3 returns the upload ID, a unique identifier that you -// must include in your upload part request. For conceptual information about -// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. For information about copying objects using a -// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format +// x-amz-copy-source-range in your request. +// +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// Instead of copying data from an existing object as part data, you might use the [UploadPart] +// action to upload new data as a part of an object in your request. +// +// You must initiate a multipart upload before you can upload any part. In +// response to your initiate request, Amazon S3 returns the upload ID, a unique +// identifier that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User +// Guide. For information about copying objects using a single atomic action vs. a +// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format // https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use IAM credentials to authenticate and authorize +// requests are not supported. For more information, see [Regional and Zonal endpoints]in the Amazon S3 User +// Guide. +// +// Authentication and authorization All UploadPartCopy requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the UploadPartCopy API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions You must have READ access to the source object and WRITE access to // the destination bucket. +// // - General purpose bucket permissions - You must have the permissions in a // policy based on the bucket types of your source bucket and destination bucket in // an UploadPartCopy operation. +// // - If the source object is in a general purpose bucket, you must have the // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have the -// s3:PubObject permission to write the object copy to the destination bucket. -// For information about permissions required to use the multipart upload API, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - You must have permissions in a bucket policy -// or an IAM identity-based policy based on the source and destination bucket types -// in an UploadPartCopy operation. -// - If the source object that you want to copy is in a directory bucket, you -// must have the s3express:CreateSession permission in the Action element of a -// policy to read the object . By default, the session is in the ReadWrite mode. -// If you want to restrict the access, you can explicitly set the -// s3express:SessionMode condition key to ReadOnly on the copy source bucket. -// - If the copy destination is a directory bucket, you must have the -// s3express:CreateSession permission in the Action element of a policy to write -// the object to the destination. The s3express:SessionMode condition key cannot -// be set to ReadOnly on the copy destination. For example policies, see Example -// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// s3:PutObject permission to write the object copy to the destination bucket. +// +// For information about permissions required to use the multipart upload API, see [Multipart upload API and permissions] +// +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in an UploadPartCopy operation. +// +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object. By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key cannot +// be set to ReadOnly on the copy destination. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Encryption -// - General purpose buckets - For information about using server-side -// encryption with customer-provided encryption keys with the UploadPartCopy -// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// . -// - Directory buckets - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// - General purpose buckets - +// +// For information about using server-side encryption with customer-provided +// +// encryption keys with the UploadPartCopy operation, see [CopyObject]and [UploadPart]. +// +// - Directory buckets - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. // // Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - Error Code: InvalidRequest +// // - Description: The specified copy source is not supported as a byte-range // copy source. +// // - HTTP Status Code: 400 Bad Request // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPartCopy : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . +// +// The following operations are related to UploadPartCopy : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Regional and Zonal endpoints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { if params == nil { params = &UploadPartCopyInput{} @@ -119,43 +163,53 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput type UploadPartCopyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not // supported. Directory bucket names must be unique in the chosen Availability // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form + // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket + // naming restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you must + // direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you // use this action with S3 on Outposts through the Amazon Web Services SDKs, you // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // information about S3 on Outposts ARNs, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string // Specifies the source object for the copy operation. You specify the value in // one of two formats, depending on whether you want to access the source object - // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the bucket // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must // be URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -163,28 +217,39 @@ type UploadPartCopyInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your bucket has versioning enabled, you could have multiple versions of the // same object. By default, x-amz-copy-source identifies the current version of // the source object to copy. To copy a specific version of the source object to // copy, append ?versionId= to the x-amz-copy-source request header (for example, // x-amz-copy-source: // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If the current version is a delete marker and you don't specify a versionId - // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found - // error, because the object does not exist. If you specify versionId in the + // ). + // + // If the current version is a delete marker and you don't specify a versionId in + // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, + // because the object does not exist. If you specify versionId in the // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an // HTTP 400 Bad Request error, because you are not allowed to specify a delete - // marker as a version for the x-amz-copy-source . Directory buckets - S3 - // Versioning isn't enabled and supported for directory buckets. + // marker as a version for the x-amz-copy-source . + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -205,34 +270,56 @@ type UploadPartCopyInput struct { // This member is required. UploadId *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both of - // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since - // headers are present in the request as follows: x-amz-copy-source-if-none-match - // condition evaluates to false , and; x-amz-copy-source-if-modified-since - // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed - // response code. + // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both of the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both of the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; - // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 - // returns 412 Precondition Failed response code. + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time // The range of bytes to copy from the source object. The range value must use the @@ -243,20 +330,26 @@ type UploadPartCopyInput struct { CopySourceRange *string // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). This functionality is not supported when the source object is in a - // directory bucket. + // AES256 ). + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. This functionality is not supported - // when the source object is in a directory bucket. + // was used when the source object was created. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the source object is in a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -273,15 +366,18 @@ type UploadPartCopyInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported when the destination bucket is a - // directory bucket. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -289,21 +385,25 @@ type UploadPartCopyInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported when the destination bucket is a directory + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } @@ -311,42 +411,52 @@ func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. This functionality is not supported when the - // source object is in a directory bucket. + // versioning on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // For directory buckets, only server-side encryption with Amazon S3 managed keys + // (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -377,25 +487,25 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -413,6 +523,15 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { return err } @@ -422,7 +541,7 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go index ac90ff7032ad..5f065ee79187 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -18,42 +18,54 @@ import ( "time" ) -// This operation is not supported by directory buckets. Passes transformed -// objects to a GetObject operation when using Object Lambda access points. For -// information about Object Lambda access points, see Transforming objects with -// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) -// in the Amazon S3 User Guide. This operation supports metadata that can be -// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and -// ErrorMessage . The GetObject response metadata is supported so that the -// WriteGetObjectResponse caller, typically an Lambda function, can provide the -// same metadata when it internally invokes GetObject . When WriteGetObjectResponse -// is called by a customer-owned Lambda function, the metadata returned to the end -// user GetObject call might differ from what Amazon S3 would normally return. You -// can include any number of metadata headers. When including a metadata header, it -// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header: -// MyCustomValue . The primary use case for this is to forward GetObject metadata. +// This operation is not supported by directory buckets. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the +// Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by [GetObject], in addition to +// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The +// GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when it +// internally invokes GetObject . When WriteGetObjectResponse is called by a +// customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta . For example, +// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to +// forward GetObject metadata. +// // Amazon Web Services provides some prebuilt Lambda functions that you can use // with S3 Object Lambda to detect and redact personally identifiable information // (PII) and decompress S3 objects. These Lambda functions are available in the // Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda access point. Example 1: PII Access Control - This Lambda function uses -// Amazon Comprehend, a natural language processing (NLP) service using machine -// learning to find insights and relationships in text. It automatically detects -// personally identifiable information (PII) such as names, addresses, dates, -// credit card numbers, and social security numbers from documents in your Amazon -// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon -// Comprehend, a natural language processing (NLP) service using machine learning -// to find insights and relationships in text. It automatically redacts personally +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally // identifiable information (PII) such as names, addresses, dates, credit card // numbers, and social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// // Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is // equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information -// on how to view and use these functions, see Using Amazon Web Services built -// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) -// in the Amazon S3 User Guide. +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 +// User Guide. +// +// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html +// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { if params == nil { params = &WriteGetObjectResponseInput{} @@ -88,7 +100,7 @@ type WriteGetObjectResponseInput struct { // The object data. Body io.Reader - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for // server-side encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool @@ -101,9 +113,12 @@ type WriteGetObjectResponseInput struct { // Lambda function. This may not match the checksum for the object stored in Amazon // S3. Amazon S3 will perform validation of the checksum values only when the // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // about checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data @@ -112,9 +127,12 @@ type WriteGetObjectResponseInput struct { // Lambda function. This may not match the checksum for the object stored in Amazon // S3. Amazon S3 will perform validation of the checksum values only when the // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // about checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data @@ -123,9 +141,12 @@ type WriteGetObjectResponseInput struct { // function. This may not match the checksum for the object stored in Amazon S3. // Amazon S3 will perform validation of the checksum values only when the original // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data @@ -134,9 +155,12 @@ type WriteGetObjectResponseInput struct { // Lambda function. This may not match the checksum for the object stored in Amazon // S3. Amazon S3 will perform validation of the checksum values only when the // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // about checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Specifies presentational information for the object. @@ -205,8 +229,9 @@ type WriteGetObjectResponseInput struct { ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) - // . + // more information about S3 Object Lock, see [Object Lock]. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when Object Lock is configured to expire. @@ -216,12 +241,15 @@ type WriteGetObjectResponseInput struct { PartsCount *int32 // Indicates if request involves bucket that is either a source or destination in - // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) - // . + // a Replication rule. For more information about S3 Replication, see [Replication]. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration operation and expiration time of @@ -232,43 +260,59 @@ type WriteGetObjectResponseInput struct { // encryption key was specified for object stored in Amazon S3. SSECustomerAlgorithm *string - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to - // encrypt data stored in S3. For more information, see Protecting data using - // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) - // . + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. + // + // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web // Services Key Management Service (Amazon Web Services KMS) symmetric encryption // customer managed key that was used for stored in Amazon S3 object. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing requested object in + // The server-side encryption algorithm used when storing requested object in // Amazon S3 (for example, AES256, aws:kms ). ServerSideEncryption types.ServerSideEncryption // The integer status code for an HTTP response of a corresponding GetObject // request. The following is a list of status codes. + // // - 200 - OK + // // - 206 - Partial Content + // // - 304 - Not Modified + // // - 400 - Bad Request + // // - 401 - Unauthorized + // // - 403 - Forbidden + // // - 404 - Not Found + // // - 405 - Method Not Allowed + // // - 409 - Conflict + // // - 411 - Length Required + // // - 412 - Precondition Failed + // // - 416 - Range Not Satisfiable + // // - 500 - Internal Server Error + // // - 503 - Service Unavailable StatusCode *int32 // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The number of tags, if any, on the object. @@ -314,28 +358,28 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddUnsignedPayloadMiddleware(stack); err != nil { + if err = addUnsignedPayload(stack); err != nil { return err } - if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + if err = addContentSHA256Header(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -353,6 +397,15 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { return err } @@ -365,7 +418,7 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go index 6ef631bd3802..3a7bc64afe2b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go @@ -12,12 +12,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } -func bindAuthEndpointParams(params *AuthResolverParameters, input interface{}, options Options) { - params.endpointParams = bindEndpointParams(input, options) +func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) { + params.endpointParams = bindEndpointParams(ctx, input, options) } type setLegacyContextSigningOptionsMiddleware struct { @@ -98,13 +98,13 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthEndpointParams(params, input, options) - bindAuthParamsRegion(params, input, options) + bindAuthEndpointParams(ctx, params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -179,7 +179,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go index 2be5df30ffae..d953cdc1cabd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -25,8 +25,17 @@ import ( "io/ioutil" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestxml_deserializeOpAbortMultipartUpload struct { } @@ -5504,12 +5513,17 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -7128,12 +7142,17 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go index 91af48fc0dee..bb5f4cf47af2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/aws" smithyauth "github.com/aws/smithy-go/auth" ) @@ -18,6 +19,16 @@ func (r *endpointAuthResolver) ResolveAuthSchemes( ) ( []*smithyauth.Option, error, ) { + if params.endpointParams.Region == nil { + // #2502: We're correcting the endpoint binding behavior to treat empty + // Region as "unset" (nil), but auth resolution technically doesn't + // care and someone could be using V1 or non-default V2 endpoint + // resolution, both of which would bypass the required-region check. + // They shouldn't be broken because the region is technically required + // by this service's endpoint-based auth resolver, so we stub it here. + params.endpointParams.Region = aws.String("") + } + opts, err := r.resolveAuthSchemes(ctx, params) if err != nil { return nil, err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go index a1f2e36d63ae..4bc53cb91d6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -228,6 +228,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -319,6 +326,13 @@ type EndpointParameters struct { // is required. Prefix *string + // The Copy Source used for Copy Object request. This is an optional parameter that + // will be set automatically for operations that are scoped to Copy + // Source. + // + // Parameter is required. + CopySource *string + // Internal parameter to disable Access Point Buckets // // Parameter is required. @@ -412,6 +426,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -1345,6 +1370,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -1388,6 +1426,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -1439,6 +1490,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -1484,6 +1548,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -3748,6 +3825,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -3793,6 +3883,19 @@ func (r *resolver) ResolveEndpoint( Properties: func() smithy.Properties { var out smithy.Properties smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, { SchemeID: "aws.auth#sigv4", SignerProperties: func() smithy.Properties { @@ -5665,10 +5768,10 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} - params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.Region = bindRegion(options.Region) params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) params.Endpoint = options.BaseEndpoint @@ -5701,6 +5804,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -5710,7 +5817,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go index 18d6c06ada08..7c7a7b424008 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go @@ -24,14 +24,9 @@ func finalizeExpressCredentials(o *Options, c *Client) { } // Operation config finalizer: update the sigv4 credentials on the default -// express provider if it changed to ensure different cache keys +// express provider in case it changed to ensure different cache keys func finalizeOperationExpressCredentials(o *Options, c Client) { - p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider) - if !ok { - return - } - - if c.options.Credentials != o.Credentials { + if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials) } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go new file mode 100644 index 000000000000..a9b54535bdef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "context" + "strings" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// isExpressUserAgent tracks whether the caller is using S3 Express +// +// we can only derive this at runtime, so the middleware needs to hold a handle +// to the underlying user-agent manipulator to set the feature flag as +// necessary +type isExpressUserAgent struct { + ua *awsmiddleware.RequestUserAgent +} + +func (*isExpressUserAgent) ID() string { + return "isExpressUserAgent" +} + +func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + const expressSuffix = "--x-s3" + + bucket, ok := bucketFromInput(in.Parameters) + if ok && strings.HasSuffix(bucket, expressSuffix) { + m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket) + } + return next.HandleSerialize(ctx, in) +} + +func addIsExpressUserAgent(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json index 4e666764fb45..6e392285ef50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -9,8 +9,7 @@ "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", @@ -123,6 +122,7 @@ "options.go", "protocol_test.go", "serializers.go", + "snapshot_test.go", "types/enums.go", "types/errors.go", "types/types.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index bff6ac9a7169..e365ab972f08 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.48.1" +const goModuleVersion = "1.58.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go index 064bcefb4fda..d22a058f191c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go @@ -27,6 +27,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -65,8 +68,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -88,17 +93,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -141,8 +149,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -193,6 +202,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go index 59524bdcbd6b..96022eee0f38 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -118,7 +118,7 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -640,7 +640,7 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -1803,7 +1803,7 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects") + opPath, opQuery := httpbinding.SplitURI("/?delete") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -4208,6 +4208,30 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) @@ -7758,7 +7782,7 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7866,7 +7890,7 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -8341,7 +8365,7 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go index ea3b9c82accd..bcb956b2618c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -11,6 +11,7 @@ const ( // Values returns all known values for AnalyticsS3ExportFileFormat. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { return []AnalyticsS3ExportFileFormat{ @@ -27,8 +28,9 @@ const ( ) // Values returns all known values for ArchiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ArchiveStatus) Values() []ArchiveStatus { return []ArchiveStatus{ "ARCHIVE_ACCESS", @@ -45,8 +47,9 @@ const ( ) // Values returns all known values for BucketAccelerateStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { return []BucketAccelerateStatus{ "Enabled", @@ -65,8 +68,9 @@ const ( ) // Values returns all known values for BucketCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketCannedACL) Values() []BucketCannedACL { return []BucketCannedACL{ "private", @@ -112,6 +116,7 @@ const ( // Values returns all known values for BucketLocationConstraint. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (BucketLocationConstraint) Values() []BucketLocationConstraint { return []BucketLocationConstraint{ @@ -156,8 +161,9 @@ const ( ) // Values returns all known values for BucketLogsPermission. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketLogsPermission) Values() []BucketLogsPermission { return []BucketLogsPermission{ "FULL_CONTROL", @@ -174,8 +180,9 @@ const ( ) // Values returns all known values for BucketType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketType) Values() []BucketType { return []BucketType{ "Directory", @@ -191,8 +198,9 @@ const ( ) // Values returns all known values for BucketVersioningStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketVersioningStatus) Values() []BucketVersioningStatus { return []BucketVersioningStatus{ "Enabled", @@ -211,8 +219,9 @@ const ( ) // Values returns all known values for ChecksumAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { return []ChecksumAlgorithm{ "CRC32", @@ -230,8 +239,9 @@ const ( ) // Values returns all known values for ChecksumMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumMode) Values() []ChecksumMode { return []ChecksumMode{ "ENABLED", @@ -248,8 +258,9 @@ const ( ) // Values returns all known values for CompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (CompressionType) Values() []CompressionType { return []CompressionType{ "NONE", @@ -266,8 +277,9 @@ const ( ) // Values returns all known values for DataRedundancy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DataRedundancy) Values() []DataRedundancy { return []DataRedundancy{ "SingleAvailabilityZone", @@ -284,8 +296,9 @@ const ( // Values returns all known values for DeleteMarkerReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { return []DeleteMarkerReplicationStatus{ "Enabled", @@ -301,8 +314,9 @@ const ( ) // Values returns all known values for EncodingType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (EncodingType) Values() []EncodingType { return []EncodingType{ "url", @@ -343,8 +357,9 @@ const ( ) // Values returns all known values for Event. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Event) Values() []Event { return []Event{ "s3:ReducedRedundancyLostObject", @@ -387,8 +402,9 @@ const ( // Values returns all known values for ExistingObjectReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { return []ExistingObjectReplicationStatus{ "Enabled", @@ -405,8 +421,9 @@ const ( ) // Values returns all known values for ExpirationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpirationStatus) Values() []ExpirationStatus { return []ExpirationStatus{ "Enabled", @@ -422,8 +439,9 @@ const ( ) // Values returns all known values for ExpressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpressionType) Values() []ExpressionType { return []ExpressionType{ "SQL", @@ -440,8 +458,9 @@ const ( ) // Values returns all known values for FileHeaderInfo. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FileHeaderInfo) Values() []FileHeaderInfo { return []FileHeaderInfo{ "USE", @@ -459,8 +478,9 @@ const ( ) // Values returns all known values for FilterRuleName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FilterRuleName) Values() []FilterRuleName { return []FilterRuleName{ "prefix", @@ -478,8 +498,9 @@ const ( // Values returns all known values for IntelligentTieringAccessTier. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { return []IntelligentTieringAccessTier{ "ARCHIVE_ACCESS", @@ -497,6 +518,7 @@ const ( // Values returns all known values for IntelligentTieringStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { return []IntelligentTieringStatus{ @@ -515,8 +537,9 @@ const ( ) // Values returns all known values for InventoryFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFormat) Values() []InventoryFormat { return []InventoryFormat{ "CSV", @@ -534,8 +557,9 @@ const ( ) // Values returns all known values for InventoryFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFrequency) Values() []InventoryFrequency { return []InventoryFrequency{ "Daily", @@ -553,8 +577,9 @@ const ( // Values returns all known values for InventoryIncludedObjectVersions. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { return []InventoryIncludedObjectVersions{ "All", @@ -584,8 +609,9 @@ const ( ) // Values returns all known values for InventoryOptionalField. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryOptionalField) Values() []InventoryOptionalField { return []InventoryOptionalField{ "Size", @@ -615,8 +641,9 @@ const ( ) // Values returns all known values for JSONType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (JSONType) Values() []JSONType { return []JSONType{ "DOCUMENT", @@ -632,8 +659,9 @@ const ( ) // Values returns all known values for LocationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (LocationType) Values() []LocationType { return []LocationType{ "AvailabilityZone", @@ -649,8 +677,9 @@ const ( ) // Values returns all known values for MetadataDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetadataDirective) Values() []MetadataDirective { return []MetadataDirective{ "COPY", @@ -667,8 +696,9 @@ const ( ) // Values returns all known values for MetricsStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetricsStatus) Values() []MetricsStatus { return []MetricsStatus{ "Enabled", @@ -685,8 +715,9 @@ const ( ) // Values returns all known values for MFADelete. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADelete) Values() []MFADelete { return []MFADelete{ "Enabled", @@ -703,8 +734,9 @@ const ( ) // Values returns all known values for MFADeleteStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADeleteStatus) Values() []MFADeleteStatus { return []MFADeleteStatus{ "Enabled", @@ -724,8 +756,9 @@ const ( ) // Values returns all known values for ObjectAttributes. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectAttributes) Values() []ObjectAttributes { return []ObjectAttributes{ "ETag", @@ -750,8 +783,9 @@ const ( ) // Values returns all known values for ObjectCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectCannedACL) Values() []ObjectCannedACL { return []ObjectCannedACL{ "private", @@ -772,8 +806,9 @@ const ( ) // Values returns all known values for ObjectLockEnabled. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockEnabled) Values() []ObjectLockEnabled { return []ObjectLockEnabled{ "Enabled", @@ -790,6 +825,7 @@ const ( // Values returns all known values for ObjectLockLegalHoldStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { return []ObjectLockLegalHoldStatus{ @@ -807,8 +843,9 @@ const ( ) // Values returns all known values for ObjectLockMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockMode) Values() []ObjectLockMode { return []ObjectLockMode{ "GOVERNANCE", @@ -825,8 +862,9 @@ const ( ) // Values returns all known values for ObjectLockRetentionMode. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { return []ObjectLockRetentionMode{ "GOVERNANCE", @@ -844,8 +882,9 @@ const ( ) // Values returns all known values for ObjectOwnership. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectOwnership) Values() []ObjectOwnership { return []ObjectOwnership{ "BucketOwnerPreferred", @@ -872,8 +911,9 @@ const ( ) // Values returns all known values for ObjectStorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectStorageClass) Values() []ObjectStorageClass { return []ObjectStorageClass{ "STANDARD", @@ -899,6 +939,7 @@ const ( // Values returns all known values for ObjectVersionStorageClass. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { return []ObjectVersionStorageClass{ @@ -915,6 +956,7 @@ const ( // Values returns all known values for OptionalObjectAttributes. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { return []OptionalObjectAttributes{ @@ -930,8 +972,9 @@ const ( ) // Values returns all known values for OwnerOverride. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (OwnerOverride) Values() []OwnerOverride { return []OwnerOverride{ "Destination", @@ -947,8 +990,9 @@ const ( ) // Values returns all known values for PartitionDateSource. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (PartitionDateSource) Values() []PartitionDateSource { return []PartitionDateSource{ "EventTime", @@ -965,8 +1009,9 @@ const ( ) // Values returns all known values for Payer. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Payer) Values() []Payer { return []Payer{ "Requester", @@ -986,8 +1031,9 @@ const ( ) // Values returns all known values for Permission. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Permission) Values() []Permission { return []Permission{ "FULL_CONTROL", @@ -1007,8 +1053,9 @@ const ( ) // Values returns all known values for Protocol. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Protocol) Values() []Protocol { return []Protocol{ "http", @@ -1025,8 +1072,9 @@ const ( ) // Values returns all known values for QuoteFields. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (QuoteFields) Values() []QuoteFields { return []QuoteFields{ "ALWAYS", @@ -1044,6 +1092,7 @@ const ( // Values returns all known values for ReplicaModificationsStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { return []ReplicaModificationsStatus{ @@ -1061,8 +1110,9 @@ const ( ) // Values returns all known values for ReplicationRuleStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { return []ReplicationRuleStatus{ "Enabled", @@ -1082,8 +1132,9 @@ const ( ) // Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationStatus) Values() []ReplicationStatus { return []ReplicationStatus{ "COMPLETE", @@ -1103,8 +1154,9 @@ const ( ) // Values returns all known values for ReplicationTimeStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { return []ReplicationTimeStatus{ "Enabled", @@ -1120,8 +1172,9 @@ const ( ) // Values returns all known values for RequestCharged. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestCharged) Values() []RequestCharged { return []RequestCharged{ "requester", @@ -1136,8 +1189,9 @@ const ( ) // Values returns all known values for RequestPayer. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestPayer) Values() []RequestPayer { return []RequestPayer{ "requester", @@ -1152,8 +1206,9 @@ const ( ) // Values returns all known values for RestoreRequestType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RestoreRequestType) Values() []RestoreRequestType { return []RestoreRequestType{ "SELECT", @@ -1170,8 +1225,9 @@ const ( ) // Values returns all known values for ServerSideEncryption. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ServerSideEncryption) Values() []ServerSideEncryption { return []ServerSideEncryption{ "AES256", @@ -1189,8 +1245,9 @@ const ( ) // Values returns all known values for SessionMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SessionMode) Values() []SessionMode { return []SessionMode{ "ReadOnly", @@ -1208,8 +1265,9 @@ const ( // Values returns all known values for SseKmsEncryptedObjectsStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { return []SseKmsEncryptedObjectsStatus{ "Enabled", @@ -1235,8 +1293,9 @@ const ( ) // Values returns all known values for StorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClass) Values() []StorageClass { return []StorageClass{ "STANDARD", @@ -1262,8 +1321,9 @@ const ( // Values returns all known values for StorageClassAnalysisSchemaVersion. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { return []StorageClassAnalysisSchemaVersion{ "V_1", @@ -1279,8 +1339,9 @@ const ( ) // Values returns all known values for TaggingDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TaggingDirective) Values() []TaggingDirective { return []TaggingDirective{ "COPY", @@ -1298,8 +1359,9 @@ const ( ) // Values returns all known values for Tier. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Tier) Values() []Tier { return []Tier{ "Standard", @@ -1321,8 +1383,9 @@ const ( ) // Values returns all known values for TransitionStorageClass. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TransitionStorageClass) Values() []TransitionStorageClass { return []TransitionStorageClass{ "GLACIER", @@ -1344,8 +1407,9 @@ const ( ) // Values returns all known values for Type. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Type) Values() []Type { return []Type{ "CanonicalUser", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go index 166484f4ec2f..a01b922f73a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -64,14 +64,17 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string { } func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Object is archived and inaccessible until restored. If the object you are -// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 -// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access -// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can -// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. +// Object is archived and inaccessible until restored. +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 +// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html type InvalidObjectState struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go index d3f7593fe761..e2775f5a1171 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -9,9 +9,9 @@ import ( // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon S3 User Guide. +// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. +// +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config type AbortIncompleteMultipartUpload struct { // Specifies the number of days after which Amazon S3 aborts an incomplete @@ -22,8 +22,9 @@ type AbortIncompleteMultipartUpload struct { } // Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. +// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. +// +// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html type AccelerateConfiguration struct { // Specifies the transfer acceleration status of the bucket. @@ -47,9 +48,10 @@ type AccessControlPolicy struct { // A container for information about access control for replicas. type AccessControlTranslation struct { - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon S3 API Reference. + // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the + // Amazon S3 API Reference. + // + // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html // // This member is required. Owner OwnerOverride @@ -82,7 +84,7 @@ type AnalyticsConfiguration struct { // This member is required. Id *string - // Contains data related to access patterns to be collected and made available to + // Contains data related to access patterns to be collected and made available to // analyze the tradeoffs between different storage classes. // // This member is required. @@ -162,9 +164,10 @@ type AnalyticsS3BucketDestination struct { Format AnalyticsS3ExportFileFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. BucketAccountId *string // The prefix to use when exporting data. The prefix is prepended to all results. @@ -187,9 +190,11 @@ type Bucket struct { } // Specifies the information about the bucket that will be created. For more -// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type BucketInfo struct { // The number of Availability Zone that's used for redundancy for the bucket. @@ -202,8 +207,9 @@ type BucketInfo struct { } // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html type BucketLifecycleConfiguration struct { // A lifecycle rule for individual objects in an Amazon S3 bucket. @@ -218,8 +224,10 @@ type BucketLifecycleConfiguration struct { type BucketLoggingStatus struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *LoggingEnabled noSmithyDocumentSerde @@ -233,8 +241,10 @@ type Checksum struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -242,8 +252,10 @@ type Checksum struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -251,8 +263,10 @@ type Checksum struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -260,8 +274,10 @@ type Checksum struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string noSmithyDocumentSerde @@ -283,8 +299,10 @@ type CommonPrefix struct { // The container for the completed multipart upload details. type CompletedMultipartUpload struct { - // Array of CompletedPart data types. If you do not supply a valid Part with your - // request, the service sends back an HTTP 400 response. + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back an + // HTTP 400 response. Parts []CompletedPart noSmithyDocumentSerde @@ -298,8 +316,10 @@ type CompletedPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -307,8 +327,10 @@ type CompletedPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -316,8 +338,10 @@ type CompletedPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -325,8 +349,10 @@ type CompletedPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -334,12 +360,14 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. + // // - General purpose buckets - In CompleteMultipartUpload , when a additional // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the // PartNumber must start at 1 and the part numbers must be consecutive. // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an // InvalidPartOrder error code. + // // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start // at 1 and the part numbers must be consecutive. PartNumber *int32 @@ -366,10 +394,12 @@ type Condition struct { // be /docs , which identifies all objects in the docs/ folder. Required when the // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for the - // redirect to be applied. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints KeyPrefixEquals *string noSmithyDocumentSerde @@ -383,27 +413,31 @@ type ContinuationEvent struct { type CopyObjectResult struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // present if it was uploaded with the object. For more information, see [Checking object integrity]in the + // Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Returns the ETag of the new object. The ETag reflects only changes to the @@ -424,8 +458,10 @@ type CopyPartResult struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -433,8 +469,10 @@ type CopyPartResult struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -442,8 +480,10 @@ type CopyPartResult struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -451,8 +491,10 @@ type CopyPartResult struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag of the object. @@ -465,8 +507,9 @@ type CopyPartResult struct { } // Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. +// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. +// +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html type CORSConfiguration struct { // A set of origins and methods (cross-origin access that you want to allow). You @@ -515,23 +558,30 @@ type CORSRule struct { // The configuration information for the bucket. type CreateBucketConfiguration struct { - // Specifies the information about the bucket that will be created. This - // functionality is only supported by directory buckets. + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. Bucket *BucketInfo - // Specifies the location where the bucket will be created. For directory buckets, - // the location type is Availability Zone. This functionality is only supported by - // directory buckets. + // Specifies the location where the bucket will be created. + // + // For directory buckets, the location type is Availability Zone. + // + // This functionality is only supported by directory buckets. Location *LocationInfo // Specifies the Region where the bucket will be created. You might choose a // Region to optimize latency, minimize costs, or address regulatory requirements. // For example, if you reside in Europe, you will probably find it advantageous to - // create buckets in the Europe (Ireland) Region. For more information, see - // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is - // created in the US East (N. Virginia) Region (us-east-1) by default. This - // functionality is not supported for directory buckets. + // create buckets in the Europe (Ireland) Region. For more information, see [Accessing a bucket]in the + // Amazon S3 User Guide. + // + // If you don't specify a Region, the bucket is created in the US East (N. + // Virginia) Region (us-east-1) by default. + // + // This functionality is not supported for directory buckets. + // + // [Accessing a bucket]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro LocationConstraint BucketLocationConstraint noSmithyDocumentSerde @@ -548,7 +598,9 @@ type CSVInput struct { // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character to - // indicate a comment line. The default character is # . Default: # + // indicate a comment line. The default character is # . + // + // Default: # Comments *string // A single character used to separate individual fields in a record. You can @@ -556,17 +608,26 @@ type CSVInput struct { FieldDelimiter *string // Describes the first line of input. Valid values are: + // // - NONE : First line is not a header. + // // - IGNORE : First line is a header, but you can't use the header values to // indicate the column in an expression. You can use column position (such as _1, // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + // // - Use : First line is a header, and you can use the header value to identify a // column in an expression ( SELECT "name" FROM OBJECT ). FileHeaderInfo FileHeaderInfo // A single character used for escaping when the field delimiter is part of the // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV + // quotation marks, as follows: " a , b " . + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string // A single character used for escaping the quotation mark character inside an @@ -599,7 +660,9 @@ type CSVOutput struct { QuoteEscapeCharacter *string // Indicates whether to use quotation marks around output fields. + // // - ALWAYS : Always use quotation marks for output fields. + // // - ASNEEDED : Use quotation marks for output fields when needed. QuoteFields QuoteFields @@ -612,7 +675,9 @@ type CSVOutput struct { // The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. type DefaultRetention struct { @@ -635,10 +700,12 @@ type DefaultRetention struct { // Container for the objects to delete. type Delete struct { - // The object to delete. Directory buckets - For directory buckets, an object - // that's composed entirely of whitespace characters is not supported by the - // DeleteObjects API operation. The request will receive a 400 Bad Request error - // and none of the objects in the request will be deleted. + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects in the + // request will be deleted. // // This member is required. Objects []ObjectIdentifier @@ -656,21 +723,24 @@ type DeletedObject struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool // The version ID of the delete marker created as a result of the DELETE // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. This functionality is - // not supported for directory buckets. + // header is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. DeleteMarkerVersionId *string // The name of the deleted object. Key *string - // The version ID of the deleted object. This functionality is not supported for - // directory buckets. + // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -703,17 +773,20 @@ type DeleteMarkerEntry struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example -// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) -// . For more information about delete marker replication, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) -// . If you are using an earlier version of the replication configuration, Amazon -// S3 handles replication of delete markers differently. For more information, see -// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . +// configuration, see [Basic Rule Configuration]. +// +// For more information about delete marker replication, see [Basic Rule Configuration]. +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations type DeleteMarkerReplication struct { - // Indicates whether to replicate delete markers. Indicates whether to replicate - // delete markers. + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. Status DeleteMarkerReplicationStatus noSmithyDocumentSerde @@ -723,7 +796,7 @@ type DeleteMarkerReplication struct { // for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store // the results. // // This member is required. @@ -740,29 +813,32 @@ type Destination struct { // Amazon S3 to change replica ownership to the Amazon Web Services account that // owns the destination bucket by specifying the AccessControlTranslation // property, this is the account ID of the destination bucket owner. For more - // information, see Replication Additional Configuration: Changing the Replica - // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon S3 User Guide. + // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. + // + // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html Account *string // A container that provides information about encryption. If // SourceSelectionCriteria is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration - // A container specifying replication metrics-related settings enabling + // A container specifying replication metrics-related settings enabling // replication metrics and events. Metrics *Metrics - // A container specifying S3 Replication Time Control (S3 RTC), including whether + // A container specifying S3 Replication Time Control (S3 RTC), including whether // S3 RTC is enabled and the time when all objects and operations on objects must // be replicated. Must be specified together with a Metrics block. ReplicationTime *ReplicationTime - // The storage class to use when replicating objects, such as S3 Standard or + // The storage class to use when replicating objects, such as S3 Standard or // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. For valid values, see the StorageClass - // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon S3 API Reference. + // object to create the object replica. + // + // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 + // API Reference. + // + // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html StorageClass StorageClass noSmithyDocumentSerde @@ -784,8 +860,9 @@ type Encryption struct { // If the encryption type is aws:kms , this optional value specifies the ID of the // symmetric encryption customer managed key to use for encryption of job results. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSKeyId *string noSmithyDocumentSerde @@ -799,8 +876,9 @@ type EncryptionConfiguration struct { // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for // the destination bucket. Amazon S3 uses this key to encrypt replica objects. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html ReplicaKmsKeyID *string noSmithyDocumentSerde @@ -819,414 +897,766 @@ type Error struct { // The error code is a string that uniquely identifies an error condition. It is // meant to be read and understood by programs that detect and handle errors by // type. The following is a list of Amazon S3 error codes. For more information, - // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) - // . + // see [Error responses]. + // // - Code: AccessDenied + // // - Description: Access Denied + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AccountProblem + // // - Description: There is a problem with your Amazon Web Services account that // prevents the action from completing successfully. Contact Amazon Web Services // Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AllAccessDisabled + // // - Description: All access to this Amazon S3 resource has been disabled. // Contact Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AmbiguousGrantByEmailAddress + // // - Description: The email address you provided is associated with more than // one account. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: AuthorizationHeaderMalformed + // // - Description: The authorization header you provided is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - HTTP Status Code: N/A + // // - Code: BadDigest + // // - Description: The Content-MD5 you specified did not match what we received. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyExists + // // - Description: The requested bucket name is not available. The bucket // namespace is shared by all users of the system. Please select a different name // and try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyOwnedByYou + // // - Description: The bucket you tried to create already exists, and you own it. // Amazon S3 returns this error in all Amazon Web Services Regions except in the // North Virginia Region. For legacy compatibility, if you re-create an existing // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 // OK and resets the bucket access control lists (ACLs). + // // - Code: 409 Conflict (in all Regions except the North Virginia Region) + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketNotEmpty + // // - Description: The bucket you tried to delete is not empty. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: CredentialsNotSupported + // // - Description: This request does not support credentials. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: CrossLocationLoggingProhibited + // // - Description: Cross-location logging not allowed. Buckets in one geographic // location cannot log information to a bucket in another location. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooSmall + // // - Description: Your proposed upload is smaller than the minimum allowed // object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooLarge + // // - Description: Your proposed upload exceeds the maximum allowed object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: ExpiredToken + // // - Description: The provided token has expired. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IllegalVersioningConfigurationException + // // - Description: Indicates that the versioning configuration specified in the // request is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncompleteBody + // // - Description: You did not provide the number of bytes specified by the // Content-Length HTTP header + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncorrectNumberOfFilesInPostRequest + // // - Description: POST requires exactly one file upload per request. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InlineDataTooLarge + // // - Description: Inline data exceeds the maximum allowed size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InternalError + // // - Description: We encountered an internal error. Please try again. + // // - HTTP Status Code: 500 Internal Server Error + // // - SOAP Fault Code Prefix: Server + // // - Code: InvalidAccessKeyId + // // - Description: The Amazon Web Services access key ID you provided does not // exist in our records. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidAddressingHeader + // // - Description: You must specify the Anonymous role. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidArgument + // // - Description: Invalid Argument + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketName + // // - Description: The specified bucket is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketState + // // - Description: The request is not valid with the current state of the bucket. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidDigest + // // - Description: The Content-MD5 you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidEncryptionAlgorithmError + // // - Description: The encryption request you specified is not valid. The valid // value is AES256. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidLocationConstraint + // // - Description: The specified location constraint is not valid. For more - // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // . + // information about Regions, see [How to Select a Region for Your Buckets]. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidObjectState + // // - Description: The action is not valid for the current state of the object. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPart + // // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified entity tag might not have // matched the part's entity tag. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPartOrder + // // - Description: The list of parts was not in ascending order. Parts list must // be specified in order by part number. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPayer + // // - Description: All access to this object has been disabled. Please contact // Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPolicyDocument + // // - Description: The content of the form does not meet the conditions specified // in the policy document. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRange + // // - Description: The requested range cannot be satisfied. + // // - HTTP Status Code: 416 Requested Range Not Satisfiable + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Please use AWS4-HMAC-SHA256 . + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: SOAP requests must be made over an HTTPS connection. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with non-DNS compliant names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with periods (.) in their names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual // style requests. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest - // - Description: Amazon S3 Transfer Accelerate is not configured on this - // bucket. + // + // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidSecurity + // // - Description: The provided security credentials are not valid. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidSOAPRequest + // // - Description: The SOAP request body is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidStorageClass + // // - Description: The storage class you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidTargetBucketForLogging + // // - Description: The target bucket for logging does not exist, is not owned by // you, or does not have the appropriate grants for the log-delivery group. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidToken + // // - Description: The provided token is malformed or otherwise invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidURI + // // - Description: Couldn't parse the specified URI. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: KeyTooLongError + // // - Description: Your key is too long. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedACLError + // // - Description: The XML you provided was not well-formed or did not validate // against our published schema. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedPOSTRequest + // // - Description: The body of your POST request is not well-formed // multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedXML + // // - Description: This happens when the user sends malformed XML (XML that // doesn't conform to the published XSD) for the configuration. The error message // is, "The XML you provided was not well-formed or did not validate against our // published schema." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxMessageLengthExceeded + // // - Description: Your request was too big. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxPostPreDataLengthExceededError + // // - Description: Your POST request fields preceding the upload file were too // large. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MetadataTooLarge - // - Description: Your metadata headers exceed the maximum allowed metadata - // size. + // + // - Description: Your metadata headers exceed the maximum allowed metadata size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MethodNotAllowed + // // - Description: The specified method is not allowed against this resource. + // // - HTTP Status Code: 405 Method Not Allowed + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingAttachment + // // - Description: A SOAP attachment was expected, but none were found. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingContentLength + // // - Description: You must provide the Content-Length HTTP header. + // // - HTTP Status Code: 411 Length Required + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingRequestBodyError + // // - Description: This happens when the user sends an empty XML document as a // request. The error message is, "Request body is empty." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityElement + // // - Description: The SOAP 1.1 request is missing a security element. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityHeader + // // - Description: Your request is missing a required header. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoLoggingStatusForKey + // // - Description: There is no such thing as a logging status subresource for a // key. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucket + // // - Description: The specified bucket does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucketPolicy + // // - Description: The specified bucket does not have a bucket policy. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchKey + // // - Description: The specified key does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchLifecycleConfiguration + // // - Description: The lifecycle configuration does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchUpload + // // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchVersion + // // - Description: Indicates that the version ID specified in the request does // not match an existing version. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NotImplemented + // // - Description: A header you provided implies functionality that is not // implemented. + // // - HTTP Status Code: 501 Not Implemented + // // - SOAP Fault Code Prefix: Server + // // - Code: NotSignedUp + // // - Description: Your account is not signed up for the Amazon S3 service. You // must sign up before you can use Amazon S3. You can sign up at the following URL: - // Amazon S3 (http://aws.amazon.com/s3) + // [Amazon S3] + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: OperationAborted + // // - Description: A conflicting conditional action is currently in progress // against this resource. Try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: PermanentRedirect + // // - Description: The bucket you are attempting to access must be addressed // using the specified endpoint. Send all future requests to this endpoint. + // // - HTTP Status Code: 301 Moved Permanently + // // - SOAP Fault Code Prefix: Client + // // - Code: PreconditionFailed + // // - Description: At least one of the preconditions you specified did not hold. + // // - HTTP Status Code: 412 Precondition Failed + // // - SOAP Fault Code Prefix: Client + // // - Code: Redirect + // // - Description: Temporary redirect. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: RestoreAlreadyInProgress + // // - Description: Object restore is already in progress. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestIsNotMultiPartContent + // // - Description: Bucket POST must be of the enclosure-type multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeout + // // - Description: Your socket connection to the server was not read from or // written to within the timeout period. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeTooSkewed + // // - Description: The difference between the request time and the server's time // is too large. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTorrentOfBucketError + // // - Description: Requesting the torrent file of a bucket is not permitted. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: SignatureDoesNotMatch + // // - Description: The request signature we calculated does not match the // signature you provided. Check your Amazon Web Services secret access key and - // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) - // for details. + // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: ServiceUnavailable + // // - Description: Service is unable to handle request. + // // - HTTP Status Code: 503 Service Unavailable + // // - SOAP Fault Code Prefix: Server + // // - Code: SlowDown + // // - Description: Reduce your request rate. + // // - HTTP Status Code: 503 Slow Down + // // - SOAP Fault Code Prefix: Server + // // - Code: TemporaryRedirect + // // - Description: You are being redirected to the bucket while DNS updates. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: TokenRefreshRequired + // // - Description: The provided token must be refreshed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: TooManyBuckets + // // - Description: You have attempted to create more buckets than allowed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnexpectedContent + // // - Description: This request does not support content. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnresolvableGrantByEmailAddress + // // - Description: The email address you provided does not match any account on // record. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UserKeyMustBeSpecified + // // - Description: The bucket POST must contain the specified field name. If it // is specified, check the order of the fields. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // + // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Amazon S3]: http://aws.amazon.com/s3 + // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html Code *string // The error key. @@ -1240,8 +1670,9 @@ type Error struct { // error message. Message *string - // The version ID of the error. This functionality is not supported for directory - // buckets. + // The version ID of the error. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -1250,11 +1681,12 @@ type Error struct { // The error information. type ErrorDocument struct { - // The object key name to use when a 4XX class error occurs. Replacement must be - // made for object keys containing special characters (such as carriage returns) - // when using XML requests. For more information, see XML related object key - // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string @@ -1268,8 +1700,9 @@ type EventBridgeConfiguration struct { } // Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 User Guide. +// information, see [Replicating Existing Objects]in the Amazon S3 User Guide. +// +// [Replicating Existing Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication type ExistingObjectReplication struct { // Specifies whether Amazon S3 replicates existing source bucket objects. @@ -1280,15 +1713,23 @@ type ExistingObjectReplication struct { noSmithyDocumentSerde } -// Specifies the Amazon S3 object key name to filter on and whether to filter on -// the suffix or prefix of the key name. +// Specifies the Amazon S3 object key name to filter on. An object key name is the +// name assigned to an object in your Amazon S3 bucket. You specify whether to +// filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can use +// to organize objects. For example, you can start the key names of related objects +// with a prefix, such as 2023- or engineering/ . Then, you can use FilterRule to +// find objects in a bucket with key names that have the same prefix. A suffix is +// similar to a prefix, but it is at the end of the object key name instead of at +// the beginning. type FilterRule struct { // The object key name prefix or suffix identifying one or more objects to which // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the + // Amazon S3 User Guide. + // + // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html Name FilterRuleName // The value that the filter searches for in object key names. @@ -1318,10 +1759,12 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // // - General purpose buckets - For GetObjectAttributes , if a additional checksum // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the // request, the response doesn't return Part . + // // - Directory buckets - For GetObjectAttributes , no matter whether a additional // checksum is applied to the object specified in the request, the response returns // Part . @@ -1367,19 +1810,31 @@ type Grantee struct { // Screen name of the grantee. DisplayName *string - // Email address of the grantee. Using email addresses to specify a grantee is - // only supported in the following Amazon Web Services Regions: + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) - // For a list of all the Amazon S3 supported Regions and endpoints, see Regions - // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the + // Amazon Web Services General Reference. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region EmailAddress *string // The canonical user ID of the grantee. @@ -1395,13 +1850,15 @@ type Grantee struct { type IndexDocument struct { // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request to - // samplebucket/images/ the data that is returned will be for the object with the - // key name images/index.html) The suffix must not be empty and must not include a - // slash character. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // endpoint. (For example, if the suffix is index.html and you make a request to + // samplebucket/images/ , the data that is returned will be for the object with the + // key name images/index.html .) The suffix must not be empty and must not include + // a slash character. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Suffix *string @@ -1412,12 +1869,14 @@ type IndexDocument struct { // Container element that identifies who initiated the multipart upload. type Initiator struct { - // Name of the Principal. This functionality is not supported for directory - // buckets. + // Name of the Principal. + // + // This functionality is not supported for directory buckets. DisplayName *string // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // // Directory buckets - If the principal is an Amazon Web Services account, it // provides the Amazon Web Services account ID. If the principal is an IAM User, it // provides a user ARN value. @@ -1460,10 +1919,11 @@ type IntelligentTieringAndOperator struct { noSmithyDocumentSerde } -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For -// information about the S3 Intelligent-Tiering storage class, see Storage class -// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access type IntelligentTieringConfiguration struct { // The ID used to identify the S3 Intelligent-Tiering configuration. @@ -1498,10 +1958,12 @@ type IntelligentTieringFilter struct { And *IntelligentTieringAndOperator // An object key name prefix that identifies the subset of objects to which the - // rule applies. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints Prefix *string // A container of a key value name pair. @@ -1511,8 +1973,9 @@ type IntelligentTieringFilter struct { } // Specifies the inventory configuration for an Amazon S3 bucket. For more -// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon S3 API Reference. +// information, see [GET Bucket inventory]in the Amazon S3 API Reference. +// +// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html type InventoryConfiguration struct { // Contains information about where to publish the inventory results. @@ -1607,9 +2070,10 @@ type InventoryS3BucketDestination struct { Format InventoryFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. AccountId *string // Contains the type of server-side encryption used to encrypt the inventory @@ -1656,8 +2120,9 @@ type JSONOutput struct { type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -1669,8 +2134,9 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -1680,9 +2146,11 @@ type LambdaFunctionConfiguration struct { noSmithyDocumentSerde } -// Container for the expiration for the lifecycle of the object. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// Container for the expiration for the lifecycle of the object. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleExpiration struct { // Indicates at what date the object is to be moved or deleted. The date value @@ -1702,9 +2170,11 @@ type LifecycleExpiration struct { noSmithyDocumentSerde } -// A lifecycle rule for individual objects in an Amazon S3 bucket. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleRule struct { // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is @@ -1715,9 +2185,9 @@ type LifecycleRule struct { // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. + // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload // Specifies the expiration for the lifecycle of the object in the form of date, @@ -1739,7 +2209,7 @@ type LifecycleRule struct { // the object's lifetime. NoncurrentVersionExpiration *NoncurrentVersionExpiration - // Specifies the transition rule for the lifecycle rule that describes when + // Specifies the transition rule for the lifecycle rule that describes when // noncurrent objects transition to a specific storage class. If your bucket is // versioning-enabled (or versioning is suspended), you can set this action to // request that Amazon S3 transition noncurrent object versions to a specific @@ -1747,10 +2217,12 @@ type LifecycleRule struct { NoncurrentVersionTransitions []NoncurrentVersionTransition // Prefix identifying one or more objects to which the rule applies. This is no - // longer used; use Filter instead. Replacement must be made for object keys - // containing special characters (such as carriage returns) when using XML - // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string @@ -1783,7 +2255,9 @@ type LifecycleRuleAndOperator struct { } // The Filter is used to identify objects that a Lifecycle Rule applies to. A -// Filter must have exactly one of Prefix , Tag , or And specified. +// Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , +// ObjectSizeLessThan , or And specified. If the Filter element is left empty, the +// Lifecycle Rule applies to all objects in the bucket. // // The following types satisfy this interface: // @@ -1825,11 +2299,12 @@ type LifecycleRuleFilterMemberObjectSizeLessThan struct { func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} -// Prefix identifying one or more objects to which the rule applies. Replacement -// must be made for object keys containing special characters (such as carriage -// returns) when using XML requests. For more information, see XML related object -// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . +// Prefix identifying one or more objects to which the rule applies. +// +// Replacement must be made for object keys containing special characters (such as +// carriage returns) when using XML requests. For more information, see [XML related object key constraints]. +// +// [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints type LifecycleRuleFilterMemberPrefix struct { Value string @@ -1847,16 +2322,21 @@ type LifecycleRuleFilterMemberTag struct { func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} -// Specifies the location where the bucket will be created. For directory buckets, -// the location type is Availability Zone. For more information about directory -// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone. For more +// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type LocationInfo struct { - // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket will be created. An - // example AZ ID value is usw2-az2 . + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the AZ ID of the + // Availability Zone where the bucket will be created. An example AZ ID value is + // usw2-az1 . Name *string // The type of location where the bucket will be created. @@ -1866,8 +2346,10 @@ type LocationInfo struct { } // Describes where logs are stored and the prefix that Amazon S3 assigns to all -// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon S3 API Reference. +// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API +// Reference. +// +// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. You @@ -1887,10 +2369,12 @@ type LoggingEnabled struct { // This member is required. TargetPrefix *string - // Container for granting information. Buckets that use the bucket owner enforced - // setting for Object Ownership don't support target grants. For more information, - // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) - // in the Amazon S3 User Guide. + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. + // + // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general TargetGrants []TargetGrant // Amazon S3 key format for log objects. @@ -1911,16 +2395,17 @@ type MetadataEntry struct { noSmithyDocumentSerde } -// A container specifying replication metrics-related settings enabling +// A container specifying replication metrics-related settings enabling +// // replication metrics and events. type Metrics struct { - // Specifies whether the replication metrics are enabled. + // Specifies whether the replication metrics are enabled. // // This member is required. Status MetricsStatus - // A container specifying the time threshold for emitting the + // A container specifying the time threshold for emitting the // s3:Replication:OperationMissedThreshold event. EventThreshold *ReplicationTimeValue @@ -1948,8 +2433,9 @@ type MetricsAndOperator struct { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an // existing metrics configuration, note that this is a full replacement of the // existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// . +// keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html type MetricsConfiguration struct { // The ID used to identify the metrics configuration. The ID has a 64 character @@ -1969,8 +2455,7 @@ type MetricsConfiguration struct { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, an // object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// . +// information, see [PutBucketMetricsConfiguration]. // // The following types satisfy this interface: // @@ -1978,6 +2463,8 @@ type MetricsConfiguration struct { // MetricsFilterMemberAnd // MetricsFilterMemberPrefix // MetricsFilterMemberTag +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html type MetricsFilter interface { isMetricsFilter() } @@ -2036,13 +2523,15 @@ type MultipartUpload struct { Key *string // Specifies the owner of the object that is part of the multipart upload. - // Directory buckets - The bucket owner is returned as the object owner for all the - // objects. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the objects. Owner *Owner - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass StorageClass // Upload ID that identifies the multipart upload. @@ -2058,18 +2547,20 @@ type MultipartUpload struct { // the object's lifetime. type NoncurrentVersionExpiration struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + // additional noncurrent versions beyond the specified number to retain. For more + // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see How Amazon S3 - // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 + // User Guide. + // + // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 noSmithyDocumentSerde @@ -2084,18 +2575,20 @@ type NoncurrentVersionExpiration struct { // specific period in the object's lifetime. type NoncurrentVersionTransition struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates How Long an Object Has Been - // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. + // + // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 // The class of storage used to store the object. @@ -2127,8 +2620,9 @@ type NotificationConfiguration struct { } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) -// in the Amazon S3 User Guide. +// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. +// +// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html type NotificationConfigurationFilter struct { // A container for object key name prefix and suffix filtering rules. @@ -2147,17 +2641,21 @@ type Object struct { // contents of an object, not its metadata. The ETag may or may not be an MD5 // digest of the object data. Whether or not it is depends on how the object was // created and how it is encrypted as described below: + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 // or plaintext, have ETags that are an MD5 digest of their object data. + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-C // or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // // - If an object is created by either the Multipart Upload or Part Copy // operation, the ETag is not an MD5 digest, regardless of the method of // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, and // therefore the ETag will not be an MD5 digest. + // // Directory buckets - MD5 is not supported by directory buckets. ETag *string @@ -2168,25 +2666,29 @@ type Object struct { // Creation date of the object. LastModified *time.Time - // The owner of the object Directory buckets - The bucket owner is returned as the - // object owner. + // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. Owner *Owner // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Only the S3 Express One Zone storage class is supported by directory - // buckets to store objects. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object Size *int64 - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass ObjectStorageClass noSmithyDocumentSerde @@ -2195,16 +2697,19 @@ type Object struct { // Object Identifier is unique value to identify objects. type ObjectIdentifier struct { - // Key name of the object. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string - // Version ID for the specific version of the object to delete. This functionality - // is not supported for directory buckets. + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -2264,9 +2769,10 @@ type ObjectPart struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -2274,8 +2780,10 @@ type ObjectPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -2283,8 +2791,10 @@ type ObjectPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be @@ -2292,8 +2802,10 @@ type ObjectPart struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // The part number identifying the part. This value is a positive integer between @@ -2330,9 +2842,10 @@ type ObjectVersion struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object. @@ -2373,14 +2886,23 @@ type Owner struct { // Container for the display name of the owner. This value is only supported in // the following Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) + // // This functionality is not supported for directory buckets. DisplayName *string @@ -2405,23 +2927,30 @@ type OwnershipControls struct { type OwnershipControlsRule struct { // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html // // This member is required. ObjectOwnership ObjectOwnership @@ -2439,9 +2968,10 @@ type Part struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be @@ -2449,8 +2979,10 @@ type Part struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be @@ -2458,15 +2990,18 @@ type Part struct { // object that was uploaded using multipart uploads, this value may not be a direct // checksum value of the full object. Instead, it's a calculation based on the // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -2486,7 +3021,9 @@ type Part struct { } // Amazon S3 keys for log objects are partitioned in the following format: -// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// // PartitionedPrefix defaults to EventTime delivery when server access logs are // delivered. type PartitionedPrefix struct { @@ -2534,41 +3071,48 @@ type ProgressEvent struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more -// information about when Amazon S3 considers a bucket or object public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon S3 User Guide. +// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in +// the Amazon S3 User Guide. +// +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should block public access control lists (ACLs) for // this bucket and objects in this bucket. Setting this element to TRUE causes the // following behavior: - // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is - // public. + // + // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. + // // - PUT Object calls fail if the request includes a public ACL. + // // - PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT - // Bucket policy if the specified bucket policy allows public access. Enabling this - // setting doesn't affect existing bucket policies. + // Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. BlockPublicPolicy *bool // Specifies whether Amazon S3 should ignore public ACLs for this bucket and // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore - // all public ACLs on this bucket and objects in this bucket. Enabling this setting - // doesn't affect the persistence of any existing ACLs and doesn't prevent new - // public ACLs from being set. + // all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs and + // doesn't prevent new public ACLs from being set. IgnorePublicAcls *bool // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only // Amazon Web Service principals and authorized users within this account if the - // bucket has a public policy. Enabling this setting doesn't affect previously - // stored bucket policies, except that public and cross-account access within any - // public bucket policy, including non-public delegation to specific accounts, is - // blocked. + // bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. RestrictPublicBuckets *bool noSmithyDocumentSerde @@ -2590,8 +3134,9 @@ type QueueConfiguration struct { QueueArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -2630,18 +3175,22 @@ type Redirect struct { // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one // of the siblings is present. Can be present only if ReplaceKeyWith is not - // provided. Replacement must be made for object keys containing special characters - // (such as carriage returns) when using XML requests. For more information, see - // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyPrefixWith *string // The specific object key to use in the redirect request. For example, redirect // request to error.html . Not required if one of the siblings is present. Can be - // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made - // for object keys containing special characters (such as carriage returns) when - // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyWith *string noSmithyDocumentSerde @@ -2667,9 +3216,11 @@ type RedirectAllRequestsTo struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications -// on replicas. If you don't specify the Filter element, Amazon S3 assumes that -// the replication configuration is the earlier version, V1. In the earlier -// version, this element is not allowed. +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. type ReplicaModifications struct { // Specifies whether Amazon S3 replicates modifications on replicas. @@ -2685,9 +3236,10 @@ type ReplicaModifications struct { type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role - // that Amazon S3 assumes when replicating objects. For more information, see How - // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon S3 User Guide. + // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in + // the Amazon S3 User Guide. + // + // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html // // This member is required. Role *string @@ -2720,18 +3272,21 @@ type ReplicationRule struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example - // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) - // . For more information about delete marker replication, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) - // . If you are using an earlier version of the replication configuration, Amazon - // S3 handles replication of delete markers differently. For more information, see - // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) - // . + // configuration, see [Basic Rule Configuration]. + // + // For more information about delete marker replication, see [Basic Rule Configuration]. + // + // If you are using an earlier version of the replication configuration, Amazon S3 + // handles replication of delete markers differently. For more information, see [Backward Compatibility]. + // + // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations DeleteMarkerReplication *DeleteMarkerReplication // Optional configuration to replicate existing source bucket objects. For more - // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 User Guide. + // information, see [Replicating Existing Objects]in the Amazon S3 User Guide. + // + // [Replicating Existing Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication ExistingObjectReplication *ExistingObjectReplication // A filter that identifies the subset of objects to which the replication rule @@ -2744,10 +3299,12 @@ type ReplicationRule struct { // An object key name prefix that identifies the object or objects to which the // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. Replacement must be made for - // object keys containing special characters (such as carriage returns) when using - // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string @@ -2757,8 +3314,10 @@ type ReplicationRule struct { // according to all replication rules. However, if there are two or more rules with // the same destination bucket, then objects will be replicated according to the // rule with the highest priority. The higher the number, the higher the priority. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon S3 User Guide. + // + // For more information, see [Replication] in the Amazon S3 User Guide. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html Priority *int32 // A container that describes additional filters for identifying the source @@ -2773,9 +3332,13 @@ type ReplicationRule struct { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: +// more than one filter. +// +// For example: +// // - If you specify both a Prefix and a Tag filter, wrap these filters in an And // tag. +// // - If you specify a filter based on multiple tags, wrap the Tag elements in an // And tag. type ReplicationRuleAndOperator struct { @@ -2806,8 +3369,10 @@ type ReplicationRuleFilter interface { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify // more than one filter. For example: +// // - If you specify both a Prefix and a Tag filter, wrap these filters in an And // tag. +// // - If you specify a filter based on multiple tags, wrap the Tag elements in an // And tag. type ReplicationRuleFilterMemberAnd struct { @@ -2819,10 +3384,12 @@ type ReplicationRuleFilterMemberAnd struct { func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} // An object key name prefix that identifies the subset of objects to which the -// rule applies. Replacement must be made for object keys containing special -// characters (such as carriage returns) when using XML requests. For more -// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . +// rule applies. +// +// Replacement must be made for object keys containing special characters (such as +// carriage returns) when using XML requests. For more information, see [XML related object key constraints]. +// +// [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints type ReplicationRuleFilterMemberPrefix struct { Value string @@ -2831,8 +3398,9 @@ type ReplicationRuleFilterMemberPrefix struct { func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} -// A container for specifying a tag key and value. The rule applies only to -// objects that have the tag in their tag set. +// A container for specifying a tag key and value. +// +// The rule applies only to objects that have the tag in their tag set. type ReplicationRuleFilterMemberTag struct { Value Tag @@ -2841,19 +3409,20 @@ type ReplicationRuleFilterMemberTag struct { func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} -// A container specifying S3 Replication Time Control (S3 RTC) related +// A container specifying S3 Replication Time Control (S3 RTC) related +// // information, including whether S3 RTC is enabled and the time when all objects // and operations on objects must be replicated. Must be specified together with a // Metrics block. type ReplicationTime struct { - // Specifies whether the replication time is enabled. + // Specifies whether the replication time is enabled. // // This member is required. Status ReplicationTimeStatus - // A container specifying the time by which replication should be complete for all - // objects and operations on objects. + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. // // This member is required. Time *ReplicationTimeValue @@ -2861,11 +3430,14 @@ type ReplicationTime struct { noSmithyDocumentSerde } -// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// // and replication metrics EventThreshold . type ReplicationTimeValue struct { - // Contains an integer specifying time in minutes. Valid value: 15 + // Contains an integer specifying time in minutes. + // + // Valid value: 15 Minutes *int32 noSmithyDocumentSerde @@ -2896,8 +3468,10 @@ type RequestProgress struct { type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation . The Days element is required for regular restores, and must not - // be provided for select requests. + // OutputLocation . + // + // The Days element is required for regular restores, and must not be provided for + // select requests. Days *int32 // The optional description for the job. @@ -2924,34 +3498,43 @@ type RestoreRequest struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information -// about these storage classes and how to work with archived objects, see Working -// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. Only the S3 Express One Zone storage class is supported by directory -// buckets to store objects. +// about these storage classes and how to work with archived objects, see [Working with archived objects]in the +// Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Only the S3 Express +// One Zone storage class is supported by directory buckets to store objects. +// +// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html type RestoreStatus struct { // Specifies whether the object is currently being restored. If the object // restoration is in progress, the header returns the value TRUE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object - // restoration has completed, the header returns the value FALSE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, - // there is no header response. + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE . + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. IsRestoreInProgress *bool // Indicates when the restored copy will expire. This value is populated only if // the object has already been restored. For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" RestoreExpiryDate *time.Time noSmithyDocumentSerde } // Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon S3 User Guide. +// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. +// +// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects type RoutingRule struct { // Container for redirect information. You can redirect requests to another host, @@ -3126,8 +3709,9 @@ type SelectParameters struct { // at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key // in your Amazon Web Services account the first time that you add an object // encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for -// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon S3 API Reference. +// SSE-KMS. For more information, see [PUT Bucket encryption]in the Amazon S3 API Reference. +// +// [PUT Bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html type ServerSideEncryptionByDefault struct { // Server-side encryption algorithm to use for the default encryption. @@ -3137,19 +3721,30 @@ type ServerSideEncryptionByDefault struct { // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias, - // or the Amazon Resource Name (ARN) of the KMS key. + // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . + // + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the + // KMS key. + // // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key Alias: alias/alias-name + // // If you use a key ID, you can run into a LogDestination undeliverable error when - // creating a VPC flow log. If you are using encryption with cross-account or - // Amazon Web Services service operations you must use a fully qualified KMS key - // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) - // . Amazon S3 only supports symmetric encryption KMS keys. For more information, - // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // creating a VPC flow log. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations you must use a fully qualified KMS key ARN. For more information, see + // [Using encryption for cross-account operations]. + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSMasterKeyID *string noSmithyDocumentSerde @@ -3178,17 +3773,20 @@ type ServerSideEncryptionRule struct { // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 - // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more - // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. + // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html BucketKeyEnabled *bool noSmithyDocumentSerde } -// The established temporary security credentials of the session. Directory -// buckets - These session credentials are only supported for the authentication -// and authorization of Zonal endpoint APIs on directory buckets. +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint APIs on directory buckets. type SessionCredentials struct { // A unique identifier that's associated with a secret access key. The access key @@ -3224,7 +3822,9 @@ type SessionCredentials struct { } // To use simple format for S3 keys for log objects, set SimplePrefix to an empty -// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] type SimplePrefix struct { noSmithyDocumentSerde } @@ -3240,12 +3840,14 @@ type SourceSelectionCriteria struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications - // on replicas. If you don't specify the Filter element, Amazon S3 assumes that - // the replication configuration is the earlier version, V1. In the earlier - // version, this element is not allowed + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed ReplicaModifications *ReplicaModifications - // A container for filter information for the selection of Amazon S3 objects + // A container for filter information for the selection of Amazon S3 objects // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria // in the replication configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects @@ -3363,10 +3965,12 @@ type Tagging struct { noSmithyDocumentSerde } -// Container for granting information. Buckets that use the bucket owner enforced -// setting for Object Ownership don't support target grants. For more information, -// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. +// +// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general type TargetGrant struct { // Container for the person being granted permissions. @@ -3397,9 +4001,10 @@ type TargetObjectKeyFormat struct { // without additional operational overhead. type Tiering struct { - // S3 Intelligent-Tiering access tier. See Storage class for automatically - // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) - // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 + // Intelligent-Tiering storage class. + // + // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access // // This member is required. AccessTier IntelligentTieringAccessTier @@ -3422,8 +4027,9 @@ type Tiering struct { type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -3435,8 +4041,9 @@ type TopicConfiguration struct { TopicArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -3447,9 +4054,10 @@ type TopicConfiguration struct { } // Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see Transitioning -// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon S3 User Guide. +// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 +// User Guide. +// +// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html type Transition struct { // Indicates when objects are transitioned to the specified storage class. The @@ -3467,8 +4075,9 @@ type Transition struct { } // Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon S3 API Reference. +// see [PUT Bucket versioning]in the Amazon S3 API Reference. +// +// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html type VersioningConfiguration struct { // Specifies whether MFA delete is enabled in the bucket versioning configuration. @@ -3491,8 +4100,9 @@ type WebsiteConfiguration struct { // The name of the index document for the website. IndexDocument *IndexDocument - // The redirect behavior for every request to this bucket's website endpoint. If - // you specify this property, you can't specify any other property. + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo // Rules that define when a redirect is applied and the redirect behavior. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 46dea1b50684..03f870f4f8b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,100 @@ +# v1.22.4 (2024-07-18) + +* No change notes available for this release. + +# v1.22.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.21.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.11 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.9 (2024-05-23) + +* No change notes available for this release. + +# v1.20.8 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.7 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.6 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.20.5 (2024-04-05) + +* No change notes available for this release. + +# v1.20.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.19.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.19.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.7 (2024-01-18) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index e439699253b3..a06c6e738fce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2019-06-10" // Client provides the API client to make operations call for AWS Single Sign-On. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -229,15 +237,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -361,17 +370,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { } func addClientUserAgent(stack *middleware.Stack, options Options) error { - if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack); err != nil { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { return err } + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion) if len(options.AppID) > 0 { - return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) } return nil } +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -390,12 +419,72 @@ func newDefaultV4Signer(o Options) *v4.Signer { }) } -func addRetryMiddlewares(stack *middleware.Stack, o Options) error { - mo := retry.AddRetryMiddlewaresOptions{ - Retryer: o.Retryer, - LogRetryAttempts: o.ClientLogMode.IsRetries(), +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err } - return retry.AddRetryMiddlewares(stack, mo) + return nil } // resolves dual-stack endpoint configuration @@ -428,12 +517,75 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + } func addResponseErrorMiddleware(stack *middleware.Stack) error { - return awshttp.AddResponseErrorMiddleware(stack) + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + } func addRequestResponseLogging(stack *middleware.Stack, o Options) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 436eadc8647e..5ce00b4961b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -30,9 +30,10 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti type GetRoleCredentialsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -83,22 +84,22 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -113,13 +114,19 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index d81b067701c1..f20e3acbfc9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -29,9 +29,10 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI type ListAccountRolesInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -88,22 +89,22 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -118,13 +119,19 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { @@ -142,14 +149,6 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack return nil } -// ListAccountRolesAPIClient is a client that implements the ListAccountRoles -// operation. -type ListAccountRolesAPIClient interface { - ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) -} - -var _ ListAccountRolesAPIClient = (*Client)(nil) - // ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles type ListAccountRolesPaginatorOptions struct { // The number of items that clients can request per page. @@ -213,6 +212,9 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -232,6 +234,14 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func return result, nil } +// ListAccountRolesAPIClient is a client that implements the ListAccountRoles +// operation. +type ListAccountRolesAPIClient interface { + ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error) +} + +var _ ListAccountRolesAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index 38f8472ae1ce..391b567db957 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -12,9 +12,10 @@ import ( ) // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by -// the administrator of the account. For more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the IAM Identity Center User Guide. This operation returns a paginated -// response. +// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity +// Center User Guide. This operation returns a paginated response. +// +// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { if params == nil { params = &ListAccountsInput{} @@ -32,9 +33,10 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op type ListAccountsInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -86,22 +88,22 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -116,13 +118,19 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpListAccountsValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { @@ -140,13 +148,6 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op return nil } -// ListAccountsAPIClient is a client that implements the ListAccounts operation. -type ListAccountsAPIClient interface { - ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) -} - -var _ ListAccountsAPIClient = (*Client)(nil) - // ListAccountsPaginatorOptions is the paginator options for ListAccounts type ListAccountsPaginatorOptions struct { // This is the number of items clients can request per page. @@ -210,6 +211,9 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op } params.MaxResults = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListAccounts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -229,6 +233,13 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op return result, nil } +// ListAccountsAPIClient is a client that implements the ListAccounts operation. +type ListAccountsAPIClient interface { + ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error) +} + +var _ ListAccountsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 82e98a894175..456e4a37170c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -12,16 +12,20 @@ import ( // Removes the locally stored SSO tokens from the client-side cache and sends an // API call to the IAM Identity Center service to invalidate the corresponding -// server-side IAM Identity Center sign in session. If a user uses IAM Identity -// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is -// used to obtain an IAM session, as specified in the corresponding IAM Identity -// Center permission set. More specifically, IAM Identity Center assumes an IAM -// role in the target account on behalf of the user, and the corresponding -// temporary AWS credentials are returned to the client. After user logout, any -// existing IAM role sessions that were created by using IAM Identity Center -// permission sets continue based on the duration configured in the permission set. -// For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) -// in the IAM Identity Center User Guide. +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, IAM +// Identity Center assumes an IAM role in the target account on behalf of the user, +// and the corresponding temporary AWS credentials are returned to the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured in +// the permission set. For more information, see [User authentications]in the IAM Identity Center User +// Guide. +// +// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -39,9 +43,10 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func type LogoutInput struct { - // The token issued by the CreateToken API call. For more information, see - // CreateToken (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the IAM Identity Center OIDC API Reference Guide. + // The token issued by the CreateToken API call. For more information, see [CreateToken] in the + // IAM Identity Center OIDC API Reference Guide. + // + // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html // // This member is required. AccessToken *string @@ -78,22 +83,22 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -108,13 +113,19 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpLogoutValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go index 3b28e825dd02..a93a77cd7fe1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -169,7 +169,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go index 8bba205f4356..d6297fa6a154 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go @@ -13,12 +13,22 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpGetRoleCredentials struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index 59456d5dc270..7f6e429fda8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -6,16 +6,22 @@ // AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web // service that makes it easy for you to assign user access to IAM Identity Center // resources such as the AWS access portal. Users can get AWS account applications -// and roles assigned to them and get federated into the application. Although AWS -// Single Sign-On was renamed, the sso and identitystore API namespaces will -// continue to retain their original name for backward compatibility purposes. For -// more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed) -// . This reference guide describes the IAM Identity Center Portal operations that +// and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API +// namespaces will continue to retain their original name for backward +// compatibility purposes. For more information, see [IAM Identity Center rename]. +// +// This reference guide describes the IAM Identity Center Portal operations that // you can call programatically and includes detailed information on data types and -// errors. AWS provides SDKs that consist of libraries and sample code for various +// errors. +// +// AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. // The SDKs provide a convenient way to create programmatic access to IAM Identity // Center and other AWS services. For more information about the AWS SDKs, -// including how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/) -// . +// including how to download and install them, see [Tools for Amazon Web Services]. +// +// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/ +// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index d31380cf2837..75ae283ef86c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -216,6 +216,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -281,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -458,10 +476,10 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} - params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.Region = bindRegion(options.Region) params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) params.Endpoint = options.BaseEndpoint @@ -488,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -497,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index 53060bccf5ee..936253d7cae7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -3,8 +3,7 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", @@ -25,6 +24,7 @@ "options.go", "protocol_test.go", "serializers.go", + "snapshot_test.go", "types/errors.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 857a6af7ee0a..f252ba39e80b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.7" +const goModuleVersion = "1.22.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index c8f7c09e46d6..d522129e768b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -187,6 +187,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", }, }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{ @@ -211,6 +219,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-3", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ @@ -219,6 +235,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ @@ -251,6 +275,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go index 5dee7e53f47e..0ba182e976fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go index 8dc02296b11f..07ac468e3184 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go @@ -25,22 +25,24 @@ type AccountInfo struct { type RoleCredentials struct { // The identifier used for the temporary security credentials. For more - // information, see Using Temporary Security Credentials to Request Access to AWS - // Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html AccessKeyId *string // The date on which temporary security credentials expire. Expiration int64 - // The key that is used to sign the request. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SecretAccessKey *string - // The token used for temporary credentials. For more information, see Using - // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) - // in the AWS IAM User Guide. + // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS + // IAM User Guide. + // + // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html SessionToken *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index f77c4785e72e..adf698c80275 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,100 @@ +# v1.26.4 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.3 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.2 (2024-07-03) + +* No change notes available for this release. + +# v1.26.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.25.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.25.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.6 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.5 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.3 (2024-05-23) + +* No change notes available for this release. + +# v1.24.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2024-05-10) + +* **Feature**: Updated request parameters for PKCE support. + +# v1.23.5 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.23.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.22.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.22.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.22.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.21.7 (2024-01-16) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index fed0897390b9..25cd1c048827 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -14,13 +14,16 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -30,6 +33,9 @@ const ServiceAPIVersion = "2019-06-10" // Client provides the API client to make operations call for AWS SSO OIDC. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -68,6 +74,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -229,15 +237,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -361,17 +370,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { } func addClientUserAgent(stack *middleware.Stack, options Options) error { - if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack); err != nil { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { return err } + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion) if len(options.AppID) > 0 { - return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) } return nil } +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -390,12 +419,72 @@ func newDefaultV4Signer(o Options) *v4.Signer { }) } -func addRetryMiddlewares(stack *middleware.Stack, o Options) error { - mo := retry.AddRetryMiddlewaresOptions{ - Retryer: o.Retryer, - LogRetryAttempts: o.ClientLogMode.IsRetries(), +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err } - return retry.AddRetryMiddlewares(stack, mo) + return nil } // resolves dual-stack endpoint configuration @@ -428,12 +517,75 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + } func addResponseErrorMiddleware(stack *middleware.Stack) error { - return awshttp.AddResponseErrorMiddleware(stack) + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + } func addRequestResponseLogging(stack *middleware.Stack, o Options) error { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 42464294141f..8b829188eb2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -32,34 +32,43 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF type CreateTokenInput struct { // The unique identifier string for the client or application. This value comes - // from the result of the RegisterClient API. + // from the result of the RegisterClientAPI. // // This member is required. ClientId *string // A secret string generated for the client. This value should come from the - // persisted result of the RegisterClient API. + // persisted result of the RegisterClientAPI. // // This member is required. ClientSecret *string // Supports the following OAuth grant types: Device Code and Refresh Token. // Specify either of the following values, depending on the grant type that you - // want: * Device Code - urn:ietf:params:oauth:grant-type:device_code * Refresh - // Token - refresh_token For information about how to obtain the device code, see - // the StartDeviceAuthorization topic. + // want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization topic. // // This member is required. GrantType *string // Used only when calling this API for the Authorization Code grant type. The // short-term code is used to identify this authorization request. This grant type - // is currently unsupported for the CreateToken API. + // is currently unsupported for the CreateTokenAPI. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the result - // of the StartDeviceAuthorization API. + // of the StartDeviceAuthorizationAPI. DeviceCode *string // Used only when calling this API for the Authorization Code grant type. This @@ -69,16 +78,18 @@ type CreateTokenInput struct { // Used only when calling this API for the Refresh Token grant type. This token is // used to refresh short-term tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is requested. The access token that // is issued is limited to the scopes that are granted. If this value is not // specified, IAM Identity Center authorizes all scopes that are configured for the - // client during the call to RegisterClient . + // client during the call to RegisterClient. Scope []string noSmithyDocumentSerde @@ -86,7 +97,8 @@ type CreateTokenInput struct { type CreateTokenOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -94,18 +106,22 @@ type CreateTokenOutput struct { // The idToken is not implemented or supported. For more information about the // features and limitations of the current IAM Identity Center OIDC implementation, - // see Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . A JSON Web Token (JWT) that identifies who is associated with the issued - // access token. + // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference]. + // + // A JSON Web Token (JWT) that identifies who is associated with the issued access + // token. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html IdToken *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used to notify the client that the returned token is an access token. The @@ -140,22 +156,22 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -170,13 +186,19 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTokenValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index ed4b98f76310..af04c251a2a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -6,15 +6,14 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates and returns access and refresh tokens for clients and applications that // are authenticated using IAM entities. The access token can be used to fetch -// short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// short-term credentials for the assigned Amazon Web Services accounts or to +// access application APIs using bearer authentication. func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) { if params == nil { params = &CreateTokenWithIAMInput{} @@ -40,10 +39,15 @@ type CreateTokenWithIAMInput struct { // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending - // on the grant type that you want: * Authorization Code - authorization_code * - // Refresh Token - refresh_token * JWT Bearer - - // urn:ietf:params:oauth:grant-type:jwt-bearer * Token Exchange - - // urn:ietf:params:oauth:grant-type:token-exchange + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange // // This member is required. GrantType *string @@ -60,6 +64,11 @@ type CreateTokenWithIAMInput struct { // in the Authorization Code GrantOptions for the application. Code *string + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + CodeVerifier *string + // Used only when calling this API for the Authorization Code grant type. This // value specifies the location of the client or application that has registered to // receive the authorization code. @@ -67,16 +76,21 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Refresh Token grant type. This token is // used to refresh short-term tokens, such as the access token, that might expire. + // // For more information about the features and limitations of the current IAM // Identity Center OIDC implementation, see Considerations for Using this Guide in - // the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that the requester can receive. The following values - // are supported: * Access Token - urn:ietf:params:oauth:token-type:access_token * - // Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + // are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token RequestedTokenType *string // The list of scopes for which authorization is requested. The access token that @@ -95,8 +109,9 @@ type CreateTokenWithIAMInput struct { // Used only when calling this API for the Token Exchange grant type. This value // specifies the type of token that is passed as the subject of the exchange. The - // following value is supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token + // following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token SubjectTokenType *string noSmithyDocumentSerde @@ -104,7 +119,8 @@ type CreateTokenWithIAMInput struct { type CreateTokenWithIAMOutput struct { - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. AccessToken *string // Indicates the time in seconds when an access token will expire. @@ -115,17 +131,21 @@ type CreateTokenWithIAMOutput struct { IdToken *string // Indicates the type of tokens that are issued by IAM Identity Center. The - // following values are supported: * Access Token - - // urn:ietf:params:oauth:token-type:access_token * Refresh Token - - // urn:ietf:params:oauth:token-type:refresh_token + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token IssuedTokenType *string // A token that, if present, can be used to refresh a previously issued access - // token that might have expired. For more information about the features and - // limitations of the current IAM Identity Center OIDC implementation, see - // Considerations for Using this Guide in the IAM Identity Center OIDC API - // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html) - // . + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide in + // the [IAM Identity Center OIDC API Reference]. + // + // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html RefreshToken *string // The list of scopes for which authorization is granted. The access token that is @@ -164,25 +184,25 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -197,13 +217,19 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 7aee90491667..d8c766c989e0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -41,6 +41,25 @@ type RegisterClientInput struct { // This member is required. ClientType *string + // This IAM Identity Center application ARN is used to define + // administrator-managed configuration for public client access to resources. At + // authorization, the scopes, grants, and redirect URI available to this client + // will be restricted by this application resource. + EntitledApplicationArn *string + + // The list of OAuth 2.0 grant types that are defined by the client. This list is + // used to restrict the token granting flows available to the client. + GrantTypes []string + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent can + // be redirected back to. + RedirectUris []string + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []string @@ -98,22 +117,22 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -128,13 +147,19 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpRegisterClientValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index d30349e6b295..7c2b38ba9028 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -30,22 +30,23 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi type StartDeviceAuthorizationInput struct { // The unique identifier string for the client that is registered with IAM - // Identity Center. This value should come from the persisted result of the - // RegisterClient API operation. + // Identity Center. This value should come from the persisted result of the RegisterClientAPI + // operation. // // This member is required. ClientId *string // A secret string that is generated for the client. This value should come from - // the persisted result of the RegisterClient API operation. + // the persisted result of the RegisterClientAPI operation. // // This member is required. ClientSecret *string - // The URL for the Amazon Web Services access portal. For more information, see - // Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal] // in the IAM Identity Center User Guide. // + // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html + // // This member is required. StartUrl *string @@ -106,22 +107,22 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -136,13 +137,19 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go index 40b3becb9f28..e6058da813f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -163,7 +163,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go index 76a1160eceb5..05e8c6b7e5f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -13,11 +13,21 @@ import ( smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestjson1_deserializeOpCreateToken struct { } @@ -581,12 +591,18 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response case strings.EqualFold("InvalidClientMetadataException", errorCode): return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + case strings.EqualFold("InvalidRedirectUriException", errorCode): + return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody) + case strings.EqualFold("InvalidRequestException", errorCode): return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) case strings.EqualFold("InvalidScopeException", errorCode): return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -1158,6 +1174,42 @@ func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Res return output } +func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRedirectUriException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidRequestException{} var buff [1024]byte @@ -1717,6 +1769,55 @@ func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGran return nil } +func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRedirectUriException + if *v == nil { + sv = &types.InvalidRedirectUriException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go index 53cd4f55a03d..1d258e5677b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -6,33 +6,41 @@ // IAM Identity Center OpenID Connect (OIDC) is a web service that enables a // client (such as CLI or a native application) to register with IAM Identity // Center. The service also enables the client to fetch the user’s access token -// upon successful authentication and authorization with IAM Identity Center. IAM -// Identity Center uses the sso and identitystore API namespaces. Considerations -// for Using This Guide Before you begin using this guide, we recommend that you -// first review the following important information about how the IAM Identity -// Center OIDC service works. +// upon successful authentication and authorization with IAM Identity Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// // - The IAM Identity Center OIDC service currently implements only the portions -// of the OAuth 2.0 Device Authorization Grant standard ( -// https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628) ) -// that are necessary to enable single sign-on authentication with the CLI. +// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to +// enable single sign-on authentication with the CLI. +// // - With older versions of the CLI, the service only emits OIDC access tokens, // so to obtain a new token, users must explicitly re-authenticate. To access the // OIDC flow that supports token refresh and doesn’t require re-authentication, // update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with // support for OIDC token refresh and configurable IAM Identity Center session -// durations. For more information, see Configure Amazon Web Services access -// portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html) -// . +// durations. For more information, see [Configure Amazon Web Services access portal session duration]. +// // - The access tokens provided by this service grant access to all Amazon Web // Services account entitlements assigned to an IAM Identity Center user, not just // a particular application. +// // - The documentation in this guide does not describe the mechanism to convert // the access token into Amazon Web Services Auth (“sigv4”) credentials for use // with IAM-protected Amazon Web Services service endpoints. For more information, -// see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) -// in the IAM Identity Center Portal API Reference Guide. +// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity +// Center User Guide. // -// For general information about IAM Identity Center, see What is IAM Identity -// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the IAM Identity Center User Guide. +// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html +// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html +// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628 +// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 85b87089026b..d7099721fe8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -216,6 +216,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -281,6 +288,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -458,10 +476,10 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} - params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.Region = bindRegion(options.Region) params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) params.Endpoint = options.BaseEndpoint @@ -488,6 +506,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -497,7 +519,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index 0a6b34935a27..b2a52633ba66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -3,8 +3,7 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", @@ -25,6 +24,7 @@ "options.go", "protocol_test.go", "serializers.go", + "snapshot_test.go", "types/errors.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 474a574ec717..b71407f8dbe5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.21.7" +const goModuleVersion = "1.26.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index cbd77fd291c4..4a29eaa20b48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -187,6 +187,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", }, }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{ @@ -211,6 +219,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-3", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ @@ -219,6 +235,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ca-central-1", }, }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{ @@ -251,6 +275,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go index b964e7e10901..a012e4cb8d9b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go index 754218b78e27..04411bd61672 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -95,6 +95,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.DeviceCode != nil { ok := object.Key("deviceCode") ok.String(*v.DeviceCode) @@ -207,6 +212,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithI ok.String(*v.Code) } + if v.CodeVerifier != nil { + ok := object.Key("codeVerifier") + ok.String(*v.CodeVerifier) + } + if v.GrantType != nil { ok := object.Key("grantType") ok.String(*v.GrantType) @@ -324,6 +334,30 @@ func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, ok.String(*v.ClientType) } + if v.EntitledApplicationArn != nil { + ok := object.Key("entitledApplicationArn") + ok.String(*v.EntitledApplicationArn) + } + + if v.GrantTypes != nil { + ok := object.Key("grantTypes") + if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil { + return err + } + } + + if v.IssuerUrl != nil { + ok := object.Key("issuerUrl") + ok.String(*v.IssuerUrl) + } + + if v.RedirectUris != nil { + ok := object.Key("redirectUris") + if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil { + return err + } + } + if v.Scopes != nil { ok := object.Key("scopes") if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { @@ -419,6 +453,28 @@ func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDevic return nil } +func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { array := value.Array() defer array.Close() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go index 86b62049fd90..2cfe7b48fed6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -188,7 +188,7 @@ func (e *InvalidClientMetadataException) ErrorCode() string { func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // Indicates that a request contains an invalid grant. This can occur if a client -// makes a CreateToken request with an invalid grant type. +// makes a CreateTokenrequest with an invalid grant type. type InvalidGrantException struct { Message *string @@ -217,6 +217,36 @@ func (e *InvalidGrantException) ErrorCode() string { } func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + Message *string + + ErrorCodeOverride *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRedirectUriException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRedirectUriException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRedirectUriException" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // Indicates that something is wrong with the input to the request. For example, a // required parameter might be missing or out of range. type InvalidRequestException struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index f9b6404d1995..f3128d752501 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,101 @@ +# v1.30.3 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.2 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.29.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.29.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.13 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.12 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.11 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.10 (2024-05-23) + +* No change notes available for this release. + +# v1.28.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.7 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + +# v1.28.6 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.3 (2024-03-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.2 (2024-03-04) + +* **Bug Fix**: Update internal/presigned-url dependency for corrected API name. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.1 (2024-02-23) + +* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.0 (2024-02-22) + +* **Feature**: Add middleware stack snapshot tests. + +# v1.27.2 (2024-02-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.1 (2024-02-20) + +* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure. + +# v1.27.0 (2024-02-13) + +* **Feature**: Bump minimum Go version to 1.20 per our language support policy. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.26.7 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 369de83b8bc7..acd2b8e7a13a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -15,15 +15,18 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) @@ -34,6 +37,9 @@ const ServiceAPIVersion = "2011-06-15" // Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -72,6 +78,8 @@ func New(options Options, optFns ...func(*Options)) *Client { options: options, } + initializeTimeOffsetResolver(client) + return client } @@ -233,15 +241,16 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + AccountIDEndpointMode: cfg.AccountIDEndpointMode, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -365,17 +374,37 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { } func addClientUserAgent(stack *middleware.Stack, options Options) error { - if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack); err != nil { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { return err } + ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion) if len(options.AppID) > 0 { - return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID) } return nil } +func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) { + id := (*awsmiddleware.RequestUserAgent)(nil).ID() + mw, ok := stack.Build.Get(id) + if !ok { + mw = awsmiddleware.NewRequestUserAgent() + if err := stack.Build.Add(mw, middleware.After); err != nil { + return nil, err + } + } + + ua, ok := mw.(*awsmiddleware.RequestUserAgent) + if !ok { + return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id) + } + + return ua, nil +} + type HTTPSignerV4 interface { SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error } @@ -394,12 +423,72 @@ func newDefaultV4Signer(o Options) *v4.Signer { }) } -func addRetryMiddlewares(stack *middleware.Stack, o Options) error { - mo := retry.AddRetryMiddlewaresOptions{ - Retryer: o.Retryer, - LogRetryAttempts: o.ClientLogMode.IsRetries(), +func addClientRequestID(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After) +} + +func addComputeContentLength(stack *middleware.Stack) error { + return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After) +} + +func addRawResponseToMetadata(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before) +} + +func addRecordResponseTiming(stack *middleware.Stack) error { + return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) +} +func addStreamingEventsPayload(stack *middleware.Stack) error { + return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) +} + +func addUnsignedPayload(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After) +} + +func addComputePayloadSHA256(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After) +} + +func addContentSHA256Header(stack *middleware.Stack) error { + return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) +} + +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + +func addRetry(stack *middleware.Stack, o Options) error { + attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { + m.LogAttempts = o.ClientLogMode.IsRetries() + }) + if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + return err + } + if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { + return err } - return retry.AddRetryMiddlewares(stack, mo) + return nil } // resolves dual-stack endpoint configuration @@ -432,12 +521,75 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error { + switch mode { + case aws.AccountIDEndpointModeUnset: + case aws.AccountIDEndpointModePreferred: + case aws.AccountIDEndpointModeDisabled: + case aws.AccountIDEndpointModeRequired: + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok { + return fmt.Errorf("accountID is required but not set") + } else if ca.Credentials.AccountID == "" { + return fmt.Errorf("accountID is required but not set") + } + // default check in case invalid mode is configured through request config + default: + return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode) + } + + return nil +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After) +} + func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { - return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) + return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before) + } func addResponseErrorMiddleware(stack *middleware.Stack) error { - return awshttp.AddResponseErrorMiddleware(stack) + return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before) + } // HTTPPresignerV4 represents presigner interface used by presign url client @@ -581,7 +733,7 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op if err != nil { return err } - err = presignedurlcust.AddAsIsPresigingMiddleware(stack) + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index 2938dac8e38b..e74fc8ba9f7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -16,69 +16,99 @@ import ( // Amazon Web Services resources. These temporary credentials consist of an access // key ID, a secret access key, and a security token. Typically, you use AssumeRole // within your account or for cross-account access. For a comparison of AssumeRole -// with other API operations that produce temporary credentials, see Requesting -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRole can be used to make API calls to any Amazon Web Services service -// with the following exception: You cannot call the Amazon Web Services STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. When you create a role, you create two policies: a role -// trust policy that specifies who can assume the role, and a permissions policy -// that specifies what can be done with the role. You specify the trusted principal -// that is allowed to assume the role in the role trust policy. To assume a role -// from a different account, your Amazon Web Services account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when the -// role is created. That trust policy states which accounts are allowed to delegate -// that access to users in the account. A user who wants to access a role in a -// different account must also have permissions that are delegated from the account -// administrator. The administrator must attach a policy that allows the user to -// call AssumeRole for the ARN of the role in the other account. To allow a user -// to assume a role in the same account, you can do either of the following: +// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the +// IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: You +// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies what +// can be done with the role. You specify the trusted principal that is allowed to +// assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have +// permissions that are delegated from the account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of the +// following: +// // - Attach a policy to the user that allows the user to call AssumeRole (as long // as the role's trust policy trusts the account). +// // - Add the user as a principal directly in the role's trust policy. // // You can do either because the role’s trust policy acts as an IAM resource-based // policy. When a resource-based policy grants access to a principal in the same // account, no additional identity-based policy is required. For more information -// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// in the IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your -// session. These tags are called session tags. For more information about session -// tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include -// multi-factor authentication (MFA) information when you call AssumeRole . This is -// useful for cross-account scenarios to ensure that the user that assumes the role -// has been authenticated with an Amazon Web Services MFA device. In that scenario, -// the trust policy of the role being assumed includes a condition that tests for -// MFA authentication. If the caller does not include valid MFA information, the -// request to assume the role is denied. The condition in a trust policy that tests -// for MFA authentication might look like the following example. "Condition": -// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see -// Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) -// in the IAM User Guide guide. To use MFA with AssumeRole , you pass values for -// the SerialNumber and TokenCode parameters. The SerialNumber value identifies -// the user's hardware or virtual MFA device. The TokenCode is the time-based -// one-time password (TOTP) that the MFA device produces. +// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM +// User Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information when +// you call AssumeRole . This is useful for cross-account scenarios to ensure that +// the user that assumes the role has been authenticated with an Amazon Web +// Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication might +// look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide. +// +// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that the +// MFA device produces. +// +// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) { if params == nil { params = &AssumeRoleInput{} @@ -101,17 +131,19 @@ type AssumeRoleInput struct { // This member is required. RoleArn *string - // An identifier for the assumed role session. Use the role session name to - // uniquely identify a session when the same role is assumed by different - // principals or for different reasons. In cross-account scenarios, the role - // session name is visible to, and can be logged by the account that owns the role. - // The role session name is also used in the ARN of the assumed role principal. - // This means that subsequent cross-account API requests that use the temporary - // security credentials will expose the role session name to the external account - // in their CloudTrail logs. The regex used to validate this parameter is a string - // of characters consisting of upper- and lower-case alphanumeric characters with - // no spaces. You can also include underscores or any of the following characters: - // =,.@- + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role is + // assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the account + // that owns the role. The role session name is also used in the ARN of the assumed + // role principal. This means that subsequent cross-account API requests that use + // the temporary security credentials will expose the role session name to the + // external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. RoleSessionName *string @@ -122,23 +154,27 @@ type AssumeRoleInput struct { // hours. If you specify a value higher than this setting or the administrator // setting (whichever is lower), the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web - // Services CLI or Amazon Web Services API role session to a maximum of one hour. - // When you use the AssumeRole API operation to assume a role, you can specify the - // duration of your role session with the DurationSeconds parameter. You can - // specify a parameter value of up to 43200 seconds (12 hours), depending on the - // maximum session duration setting for your role. However, if you assume a role - // using role chaining and provide a DurationSeconds parameter value greater than - // one hour, the operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API + // role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of up to + // 43200 seconds (12 hours), depending on the maximum session duration setting for + // your role. However, if you assume a role using role chaining and provide a + // DurationSeconds parameter value greater than one hour, the operation fails. To + // learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // A unique identifier that might be required when you assume a role in another @@ -149,63 +185,79 @@ type AssumeRoleInput struct { // the administrator of the trusting account might send an external ID to the // administrator of the trusted account. That way, only someone with the ID can // assume the role, rather than everyone in the account. For more information about - // the external ID, see How to Use an External ID When Granting Access to Your - // Amazon Web Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@:/- + // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- + // + // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html ExternalId *string // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of previously acquired trusted context assertions in the format of a // JSON array. The trusted context assertion is signed and encrypted by Amazon Web - // Services STS. The following is an example of a ProvidedContext value that - // includes a single trusted context assertion and the ARN of the context provider - // from which the trusted context assertion was generated. - // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + // Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which the + // trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] ProvidedContexts []types.ProvidedContext // The identification number of the MFA device that is associated with the user @@ -213,79 +265,97 @@ type AssumeRoleInput struct { // the role being assumed includes a condition that requires MFA authentication. // The value is either the serial number for a hardware device (such as // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as - // arn:aws:iam::123456789012:mfa/user ). The regex used to validate this parameter - // is a string of characters consisting of upper- and lower-case alphanumeric - // characters with no spaces. You can also include underscores or any of the - // following characters: =,.@- + // arn:aws:iam::123456789012:mfa/user ). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- SerialNumber *string // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@-. You cannot use a value that begins with the text aws: . This prefix is - // reserved for Amazon Web Services internal use. + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@-. You cannot use a + // value that begins with the text aws: . This prefix is reserved for Amazon Web + // Services internal use. + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // A list of session tags that you want to pass. Each session tag consists of a - // key name and an associated value. For more information about session tags, see - // Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters, and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the role. When you do, session - // tags override a role tag with the same key. Tag key–value pairs are not case - // sensitive, but case is preserved. This means that you cannot have separate - // Department and department tag keys. Assume that the role has the Department = - // Marketing tag and you pass the department = engineering session tag. Department - // and department are not saved as separate tags, and the session tag passed in - // the request takes precedence over the role tag. Additionally, if you used - // temporary credentials to perform this operation, the new session inherits any - // transitive session tags from the calling session. If you pass a session tag with - // the same key as an inherited tag, the operation fails. To view the inherited - // tags for a session, see the CloudTrail logs. For more information, see Viewing - // Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions] // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // Additionally, if you used temporary credentials to perform this operation, the + // new session inherits any transitive session tags from the calling session. If + // you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. For + // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide. + // + // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length + // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs Tags []types.Tag // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA. (In other words, if the policy includes a condition that // tests for MFA). If the role being assumed requires MFA and if the TokenCode // value is missing or expired, the AssumeRole call returns an "access denied" - // error. The format for this parameter, as described by its regex pattern, is a - // sequence of six numeric digits. + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string // A list of keys for session tags that you want to set as transitive. If you set // a tag key as transitive, the corresponding key and value passes to subsequent - // sessions in a role chain. For more information, see Chaining Roles with Session - // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) - // in the IAM User Guide. This parameter is optional. When you set session tags as - // transitive, the session policy and session tags packed binary limit is not - // affected. If you choose not to specify a transitive tag key, then no tags are - // passed from this session to any subsequent sessions. + // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed from + // this session to any subsequent sessions. + // + // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining TransitiveTagKeys []string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRole request, including temporary -// Amazon Web Services credentials that can be used to make Amazon Web Services -// requests. +// Contains the response to a successful AssumeRole request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -296,9 +366,10 @@ type AssumeRoleOutput struct { AssumedRoleUser *types.AssumedRoleUser // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -308,17 +379,21 @@ type AssumeRoleOutput struct { PackedPolicySize *int32 // The source identity specified by the principal that is calling the AssumeRole - // operation. You can require users to specify a source identity when they assume a - // role. You do this by using the sts:SourceIdentity condition key in a role trust - // policy. You can use source identity information in CloudTrail logs to determine - // who took actions with a role. You can use the aws:SourceIdentity condition key - // to further control access to Amazon Web Services resources based on the value of - // source identity. For more information about using source identity, see Monitor - // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // operation. + // + // You can require users to specify a source identity when they assume a role. You + // do this by using the sts:SourceIdentity condition key in a role trust policy. + // You can use source identity information in CloudTrail logs to determine who took + // actions with a role. You can use the aws:SourceIdentity condition key to + // further control access to Amazon Web Services resources based on the value of + // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the + // IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // Metadata pertaining to the operation's result. @@ -349,25 +424,25 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -382,13 +457,19 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index ef576b6407df..4c685abd5f71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -16,92 +16,132 @@ import ( // mechanism for tying an enterprise identity store or directory to role-based // Amazon Web Services access without user-specific credentials or configuration. // For a comparison of AssumeRoleWithSAML with the other API operations that -// produce temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this -// operation consist of an access key ID, a secret access key, and a security -// token. Applications can use these temporary security credentials to sign calls -// to Amazon Web Services services. Session Duration By default, the temporary -// security credentials created by AssumeRoleWithSAML last for one hour. However, -// you can use the optional DurationSeconds parameter to specify the duration of -// your session. Your role session lasts for the duration that you specify, or -// until the time specified in the SAML authentication response's -// SessionNotOnOrAfter value, whichever is shorter. You can provide a -// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session -// duration setting for the role. This setting can have a value from 1 hour to 12 -// hours. To learn how to view the maximum value for your role, see View the -// Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) -// limits your CLI or Amazon Web Services API role session to a maximum of one +// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of an +// access key ID, a secret access key, and a security token. Applications can use +// these temporary security credentials to sign calls to Amazon Web Services +// services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML +// authentication response's SessionNotOnOrAfter value, whichever is shorter. You +// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the +// maximum session duration setting for the role. This setting can have a value +// from 1 hour to 12 hours. To learn how to view the maximum value for your role, +// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console URL. +// For more information, see [Using IAM Roles]in the IAM User Guide. +// +// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one // hour. When you use the AssumeRole API operation to assume a role, you can // specify the duration of your role session with the DurationSeconds parameter. // You can specify a parameter value of up to 43200 seconds (12 hours), depending // on the maximum session duration setting for your role. However, if you assume a // role using role chaining and provide a DurationSeconds parameter value greater -// than one hour, the operation fails. Permissions The temporary security -// credentials created by AssumeRoleWithSAML can be used to make API calls to any -// Amazon Web Services service with the following exception: you cannot call the -// STS GetFederationToken or GetSessionToken API operations. (Optional) You can -// pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of -// Amazon Web Services security credentials. The identity of the caller is -// validated by using keys in the metadata document that is uploaded for the SAML -// provider entity for your identity provider. Calling AssumeRoleWithSAML can -// result in an entry in your CloudTrail logs. The entry includes the value in the -// NameID element of the SAML assertion. We recommend that you use a NameIDType -// that is not associated with any personally identifiable information (PII). For -// example, you could instead use the persistent identifier ( -// urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). Tags (Optional) You can -// configure your IdP to pass attributes into your SAML assertion as session tags. -// Each session tag consists of a key name and an associated value. For more -// information about session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, session tags -// override the role's tags with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. SAML Configuration Before your application can call -// AssumeRoleWithSAML , you must configure your SAML identity provider (IdP) to -// issue the claims required by Amazon Web Services. Additionally, you must use -// Identity and Access Management (IAM) to create a SAML provider entity in your -// Amazon Web Services account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. +// than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used to +// make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys in +// the metadata document that is uploaded for the SAML provider entity for your +// identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The +// entry includes the value in the NameID element of the SAML assertion. We +// recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the +// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML +// assertion as session tags. Each session tag consists of a key name and an +// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, session tags override the role's tags with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML , you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web +// Services. Additionally, you must use Identity and Access Management (IAM) to +// create a SAML provider entity in your Amazon Web Services account that +// represents your identity provider. You must also create an IAM role that +// specifies this SAML provider in its trust policy. +// // For more information, see the following resources: -// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. -// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. -// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. -// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// +// [About SAML 2.0-based Federation] +// - in the IAM User Guide. +// +// [Creating SAML Identity Providers] +// - in the IAM User Guide. +// +// [Configuring a Relying Party and Claims] +// - in the IAM User Guide. +// +// [Creating a Role for SAML 2.0 Federation] +// - in the IAM User Guide. +// +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html +// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) { if params == nil { params = &AssumeRoleWithSAMLInput{} @@ -130,9 +170,11 @@ type AssumeRoleWithSAMLInput struct { // This member is required. RoleArn *string - // The base64 encoded SAML authentication response provided by the IdP. For more - // information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the IAM User Guide. + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide. + // + // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html // // This member is required. SAMLAssertion *string @@ -146,92 +188,114 @@ type AssumeRoleWithSAMLInput struct { // than this setting, the operation fails. For example, if you specify a session // duration of 12 hours, but your administrator set the maximum session duration to // 6 hours, your operation fails. To learn how to view the maximum value for your - // role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithSAML request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithSAMLOutput struct { // The identifiers for the temporary security credentials that the operation // returns. AssumedRoleUser *types.AssumedRoleUser - // The value of the Recipient attribute of the SubjectConfirmationData element of + // The value of the Recipient attribute of the SubjectConfirmationData element of // the SAML assertion. Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // The value of the Issuer element of the SAML assertion. Issuer *string // A hash value based on the concatenation of the following: + // // - The Issuer response value. + // // - The Amazon Web Services account ID. + // // - The friendly name (the last part of the ARN) of the SAML provider in IAM. + // // The combination of NameQualifier and Subject can be used to uniquely identify a - // user. The following pseudocode shows how the hash value is calculated: BASE64 ( - // SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) + // user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) ) NameQualifier *string // A percentage value that indicates the packed size of the session policies and @@ -240,31 +304,36 @@ type AssumeRoleWithSAMLOutput struct { // allowed space. PackedPolicySize *int32 - // The value in the SourceIdentity attribute in the SAML assertion. You can - // require users to set a source identity value when they assume a role. You do - // this by using the sts:SourceIdentity condition key in a role trust policy. That - // way, actions that are taken with the role are associated with that user. After - // the source identity is set, the value cannot be changed. It is present in the - // request for all actions that are taken by the role and persists across chained - // role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your SAML identity provider to use an attribute - // associated with your users, like user name or email, as the source identity when - // calling AssumeRoleWithSAML . You do this by adding an attribute to the SAML - // assertion. For more information about using source identity, see Monitor and - // control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your SAML identity provider to use an + // attribute associated with your users, like user name or email, as the source + // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to + // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in + // the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html SourceIdentity *string // The value of the NameID element in the Subject element of the SAML assertion. Subject *string - // The format of the name ID, as defined by the Format attribute in the NameID + // The format of the name ID, as defined by the Format attribute in the NameID // element of the SAML assertion. Typical examples of the format are transient or - // persistent . If the format includes the prefix - // urn:oasis:names:tc:SAML:2.0:nameid-format , that prefix is removed. For example, + // persistent . + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format , + // that prefix is removed. For example, // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient . // If the format includes any other prefix, the format is returned with no // modifications. @@ -298,22 +367,22 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -328,13 +397,19 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index b2f126b1d0d2..0b5e5a377c2f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -14,105 +14,143 @@ import ( // Returns a set of temporary security credentials for users who have been // authenticated in a mobile or web application with a web identity provider. // Example providers include the OAuth 2.0 providers Login with Amazon and -// Facebook, or any OpenID Connect-compatible identity provider such as Google or -// Amazon Cognito federated identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// . For mobile applications, we recommend that you use Amazon Cognito. You can use -// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and the Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// to uniquely identify a user. You can also supply the user with a consistent -// identity throughout the lifetime of an application. To learn more about Amazon -// Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) -// in Amazon Cognito Developer Guide. Calling AssumeRoleWithWebIdentity does not -// require the use of Amazon Web Services security credentials. Therefore, you can -// distribute an application (for example, on mobile devices) that requests -// temporary security credentials without including long-term Amazon Web Services -// credentials in the application. You also don't need to deploy server-based proxy -// services that use long-term Amazon Web Services credentials. Instead, the -// identity of the caller is validated by using a token from the web identity -// provider. For a comparison of AssumeRoleWithWebIdentity with the other API -// operations that produce temporary credentials, see Requesting Temporary -// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. The temporary security credentials returned by this API -// consist of an access key ID, a secret access key, and a security token. -// Applications can use these temporary security credentials to sign calls to -// Amazon Web Services service API operations. Session Duration By default, the -// temporary security credentials created by AssumeRoleWithWebIdentity last for -// one hour. However, you can use the optional DurationSeconds parameter to -// specify the duration of your session. You can provide a value from 900 seconds -// (15 minutes) up to the maximum session duration setting for the role. This -// setting can have a value from 1 hour to 12 hours. To learn how to view the -// maximum value for your role, see View the Maximum Session Duration Setting for -// a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you use -// the AssumeRole* API operations or the assume-role* CLI commands. However the -// limit does not apply when you use those operations to create a console URL. For -// more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. Permissions The temporary security credentials created by -// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web -// Services service with the following exception: you cannot call the STS -// GetFederationToken or GetSessionToken API operations. (Optional) You can pass -// inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. -// Passing policies to this operation returns new temporary credentials. The -// resulting session's permissions are the intersection of the role's -// identity-based policy and the session policies. You can use the role's temporary -// credentials in subsequent Amazon Web Services API calls to access resources in -// the account that owns the role. You cannot use session policies to grant more -// permissions than those allowed by the identity-based policy of the role that is -// being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass -// attributes into your web identity token as session tags. Each session tag -// consists of a key name and an associated value. For more information about -// session tags, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can pass up to 50 session tags. The plaintext session -// tag keys can’t exceed 128 characters and the values can’t exceed 256 characters. -// For these and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) -// in the IAM User Guide. An Amazon Web Services conversion compresses the passed -// inline session policy, managed policy ARNs, and session tags into a packed -// binary format that has a separate limit. Your request can fail for this limit -// even if your plaintext meets the other requirements. The PackedPolicySize -// response element indicates by percentage how close the policies and tags for -// your request are to the upper size limit. You can pass a session tag with the -// same key as a tag that is attached to the role. When you do, the session tag -// overrides the role tag with the same key. An administrator must grant you the -// permissions necessary to pass session tags. The administrator can also create -// granular permissions to allow you to pass only specific session tags. For more -// information, see Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. You can set the session tags as transitive. Transitive -// tags persist during role chaining. For more information, see Chaining Roles -// with Session Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) -// in the IAM User Guide. Identities Before your application can call -// AssumeRoleWithWebIdentity , you must have an identity token from a supported -// identity provider and create a role that the application can assume. The role -// that your application assumes must trust the identity provider that is -// associated with the identity token. In other words, the identity provider must -// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can -// result in an entry in your CloudTrail logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided web identity token. We recommend that you avoid using any -// personally identifiable information (PII) in this field. For example, you could -// instead use a GUID or a pairwise identifier, as suggested in the OIDC -// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes) -// . For more information about how to use web identity federation and the +// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities]. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can use +// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also +// supply the user with a consistent identity throughout the lifetime of an +// application. +// +// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application (for +// example, on mobile devices) that requests temporary security credentials without +// including long-term Amazon Web Services credentials in the application. You also +// don't need to deploy server-based proxy services that use long-term Amazon Web +// Services credentials. Instead, the identity of the caller is validated by using +// a token from the web identity provider. For a comparison of +// AssumeRoleWithWebIdentity with the other API operations that produce temporary +// credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service API +// operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by +// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional +// DurationSeconds parameter to specify the duration of your session. You can +// provide a value from 900 seconds (15 minutes) up to the maximum session duration +// setting for the role. This setting can have a value from 1 hour to 12 hours. To +// learn how to view the maximum value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. +// The maximum session duration limit applies when you use the AssumeRole* API +// operations or the assume-role* CLI commands. However the limit does not apply +// when you use those operations to create a console URL. For more information, see +// [Using IAM Roles]in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can be +// used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken API +// operations. +// +// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a +// single JSON policy document to use as an inline session policy. You can also +// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed +// session policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. Passing policies to this operation +// returns new temporary credentials. The resulting session's permissions are the +// intersection of the role's identity-based policy and the session policies. You +// can use the role's temporary credentials in subsequent Amazon Web Services API +// calls to access resources in the account that owns the role. You cannot use +// session policies to grant more permissions than those allowed by the +// identity-based policy of the role that is being assumed. For more information, +// see [Session Policies]in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed +// 128 characters and the values can’t exceed 256 characters. For these and +// additional limits, see [IAM and STS Character Limits]in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has a +// separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the upper +// size limit. +// +// You can pass a session tag with the same key as a tag that is attached to the +// role. When you do, the session tag overrides the role tag with the same key. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during role +// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity , you must have an +// identity token from a supported identity provider and create a role that the +// application can assume. The role that your application assumes must trust the +// identity provider that is associated with the identity token. In other words, +// the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the [Subject]of the provided web identity token. We recommend +// that you avoid using any personally identifiable information (PII) in this +// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification]. +// +// For more information about how to use web identity federation and the // AssumeRoleWithWebIdentity API, see the following resources: -// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// . -// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/) -// . Walk through the process of authenticating through Login with Amazon, +// +// [Using Web Identity Federation API Operations for Mobile Apps] +// - and [Federation Through a Web-based Identity Provider]. +// +// [Web Identity Federation Playground] +// - . Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then using // those credentials to make a request to Amazon Web Services. -// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) -// . These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these +// +// [Amazon Web Services SDK for iOS Developer Guide] +// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the +// identity providers. The toolkits then show how to use the information from these // providers to get and use temporary security credentials. -// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications) -// . This article discusses web identity federation and shows an example of how to -// use web identity federation to get access to content in Amazon S3. +// +// [Web Identity Federation with Mobile Applications] +// - . This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon S3. +// +// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/ +// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session +// [Web Identity Federation Playground]: https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/ +// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/ +// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html +// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining +// [Web Identity Federation with Mobile Applications]: http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications +// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html +// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) { if params == nil { params = &AssumeRoleWithWebIdentityInput{} @@ -139,10 +177,11 @@ type AssumeRoleWithWebIdentityInput struct { // identifier that is associated with the user who is using your application. That // way, the temporary security credentials that your application will use are // associated with that user. This session name is included as part of the ARN and - // assumed role ID in the AssumedRoleUser response element. The regex used to - // validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@- + // assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. RoleSessionName *string @@ -162,73 +201,90 @@ type AssumeRoleWithWebIdentityInput struct { // higher than this setting, the operation fails. For example, if you specify a // session duration of 12 hours, but your administrator set the maximum session // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) - // in the IAM User Guide. By default, the value is set to 3600 seconds. The - // DurationSeconds parameter is separate from the duration of a console session - // that you might request using the returned credentials. The request to the - // federation endpoint for a console sign-in token takes a SessionDuration + // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request to + // the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more - // information, see Creating a URL that Enables Federated Users to Access the - // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) - // in the IAM User Guide. + // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide. + // + // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session + // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. + // // This parameter is optional. Passing policies to this operation returns new // temporary credentials. The resulting session's permissions are the intersection // of the role's identity-based policy and the session policies. You can use the // role's temporary credentials in subsequent Amazon Web Services API calls to // access resources in the account that owns the role. You cannot use session // policies to grant more permissions than those allowed by the identity-based - // policy of the role that is being assumed. For more information, see Session - // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The plaintext that you use for both inline and managed - // session policies can't exceed 2,048 characters. The JSON policy characters can - // be any ASCII character from the space character to the end of the valid - // character list (\u0020 through \u00FF). It can also include the tab (\u0009), - // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web - // Services conversion compresses the passed inline session policy, managed policy - // ARNs, and session tags into a packed binary format that has a separate limit. - // Your request can fail for this limit even if your plaintext meets the other - // requirements. The PackedPolicySize response element indicates by percentage how - // close the policies and tags for your request are to the upper size limit. + // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM + // User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as managed session policies. The policies must exist in the same account as - // the role. This parameter is optional. You can provide up to 10 managed policy - // ARNs. However, the plaintext that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, see - // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. An Amazon Web Services conversion - // compresses the passed inline session policy, managed policy ARNs, and session - // tags into a packed binary format that has a separate limit. Your request can - // fail for this limit even if your plaintext meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. Passing policies to this - // operation returns new temporary credentials. The resulting session's permissions - // are the intersection of the role's identity-based policy and the session - // policies. You can use the role's temporary credentials in subsequent Amazon Web - // Services API calls to access resources in the account that owns the role. You - // cannot use session policies to grant more permissions than those allowed by the - // identity-based policy of the role that is being assumed. For more information, - // see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. + // the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the + // Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's + // identity-based policy and the session policies. You can use the role's temporary + // credentials in subsequent Amazon Web Services API calls to access resources in + // the account that owns the role. You cannot use session policies to grant more + // permissions than those allowed by the identity-based policy of the role that is + // being assumed. For more information, see [Session Policies]in the IAM User Guide. + // + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // The fully qualified host component of the domain name of the OAuth 2.0 identity // provider. Do not specify this value for an OpenID Connect identity provider. + // // Currently www.amazon.com and graph.facebook.com are the only supported identity // providers for OAuth 2.0 access tokens. Do not include URL schemes and port - // numbers. Do not specify this value for OpenID Connect ID tokens. + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. ProviderId *string noSmithyDocumentSerde } -// Contains the response to a successful AssumeRoleWithWebIdentity request, -// including temporary Amazon Web Services credentials that can be used to make -// Amazon Web Services requests. +// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type AssumeRoleWithWebIdentityOutput struct { // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers @@ -244,9 +300,10 @@ type AssumeRoleWithWebIdentityOutput struct { Audience *string // The temporary security credentials, which include an access key ID, a secret - // access key, and a security token. The size of the security token that STS API - // operations return is not fixed. We strongly recommend that you make no - // assumptions about the maximum size. + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // A percentage value that indicates the packed size of the session policies and @@ -255,30 +312,34 @@ type AssumeRoleWithWebIdentityOutput struct { // allowed space. PackedPolicySize *int32 - // The issuing authority of the web identity token presented. For OpenID Connect + // The issuing authority of the web identity token presented. For OpenID Connect // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed in // the AssumeRoleWithWebIdentity request. Provider *string // The value of the source identity that is returned in the JSON web token (JWT) - // from the identity provider. You can require users to set a source identity value - // when they assume a role. You do this by using the sts:SourceIdentity condition - // key in a role trust policy. That way, actions that are taken with the role are - // associated with that user. After the source identity is set, the value cannot be - // changed. It is present in the request for all actions that are taken by the role - // and persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) - // sessions. You can configure your identity provider to use an attribute + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with that + // user. After the source identity is set, the value cannot be changed. It is + // present in the request for all actions that are taken by the role and persists + // across [chained role]sessions. You can configure your identity provider to use an attribute // associated with your users, like user name or email, as the source identity when // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON - // web token. To learn more about OIDC tokens and claims, see Using Tokens with - // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) - // in the Amazon Cognito Developer Guide. For more information about using source - // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) - // in the IAM User Guide. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon + // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles] + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- + // + // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining + // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html SourceIdentity *string // The unique user identifier that is returned by the identity provider. This @@ -317,22 +378,22 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -347,13 +408,19 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index 97a00b97da24..b1f14d28ce2c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -6,34 +6,44 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Decodes additional information about the authorization status of a request from -// an encoded message returned in response to an Amazon Web Services request. For -// example, if a user is not authorized to perform an operation that he or she has -// requested, the request returns a Client.UnauthorizedOperation response (an HTTP -// 403 response). Some Amazon Web Services operations additionally return an -// encoded message that can provide details about this authorization failure. Only -// certain Amazon Web Services operations return an encoded authorization message. -// The documentation for an individual operation indicates whether that operation -// returns an encoded message in addition to returning an HTTP code. The message is -// encoded because the details of the authorization status can contain privileged -// information that the user who requested the operation should not see. To decode -// an authorization status message, a user must be granted permissions through an -// IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) -// to request the DecodeAuthorizationMessage ( sts:DecodeAuthorizationMessage ) -// action. The decoded message includes the following type of information: +// an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he or she +// has requested, the request returns a Client.UnauthorizedOperation response (an +// HTTP 403 response). Some Amazon Web Services operations additionally return an +// encoded message that can provide details about this authorization failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether that +// operation returns an encoded message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation should +// not see. To decode an authorization status message, a user must be granted +// permissions through an IAM [policy]to request the DecodeAuthorizationMessage ( +// sts:DecodeAuthorizationMessage ) action. +// +// The decoded message includes the following type of information: +// // - Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether a -// Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User +// Guide. +// // - The principal who made the request. +// // - The requested action. +// // - The requested resource. +// // - The values of condition keys in the context of the user's request. +// +// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow +// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) { if params == nil { params = &DecodeAuthorizationMessageInput{} @@ -95,25 +105,25 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -128,13 +138,19 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index e01fcebfe520..3ba00873db90 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -6,28 +6,35 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the account identifier for the specified access key ID. Access keys -// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE ) and -// a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). -// For more information about access keys, see Managing Access Keys for IAM Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) -// in the IAM User Guide. When you pass an access key ID to this operation, it -// returns the ID of the Amazon Web Services account to which the keys belong. -// Access key IDs beginning with AKIA are long-term credentials for an IAM user or -// the Amazon Web Services account root user. Access key IDs beginning with ASIA -// are temporary credentials that are created using STS operations. If the account -// in the response belongs to you, you can sign in as the root user and review your -// root user access keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) -// to learn which IAM user owns the keys. To learn who requested the temporary -// credentials for an ASIA access key, view the STS events in your CloudTrail logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) -// in the IAM User Guide. This operation does not indicate the state of the access -// key. The key might be active, inactive, or deleted. Active keys might not have -// permissions to perform an operation. Providing a deleted access key might return -// an error that the key doesn't exist. +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, +// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example, +// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access +// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs to +// you, you can sign in as the root user and review your root user access keys. +// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who +// requested the temporary credentials for an ASIA access key, view the STS events +// in your [CloudTrail logs]in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might be +// active, inactive, or deleted. Active keys might not have permissions to perform +// an operation. Providing a deleted access key might return an error that the key +// doesn't exist. +// +// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html +// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html +// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) { if params == nil { params = &GetAccessKeyInfoInput{} @@ -45,9 +52,10 @@ func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoI type GetAccessKeyInfoInput struct { - // The identifier of an access key. This parameter allows (through its regex - // pattern) a string of characters that can consist of any upper- or lowercase - // letter or digit. + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters that + // can consist of any upper- or lowercase letter or digit. // // This member is required. AccessKeyId *string @@ -88,25 +96,25 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -121,13 +129,19 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index 80296940899e..abac49ad2f81 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -12,13 +12,15 @@ import ( ) // Returns details about the IAM user or role whose credentials are used to call -// the operation. No permissions are required to perform this operation. If an -// administrator attaches a policy to your identity that explicitly denies access -// to the sts:GetCallerIdentity action, you can still perform this operation. -// Permissions are not required because the same information is returned when -// access is denied. To view an example response, see I Am Not Authorized to -// Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) -// in the IAM User Guide. +// the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the +// sts:GetCallerIdentity action, you can still perform this operation. Permissions +// are not required because the same information is returned when access is denied. +// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide. +// +// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) { if params == nil { params = &GetCallerIdentityInput{} @@ -38,8 +40,8 @@ type GetCallerIdentityInput struct { noSmithyDocumentSerde } -// Contains the response to a successful GetCallerIdentity request, including -// information about the entity making the request. +// Contains the response to a successful GetCallerIdentity request, including information about the +// entity making the request. type GetCallerIdentityOutput struct { // The Amazon Web Services account ID number of the account that owns or contains @@ -51,8 +53,10 @@ type GetCallerIdentityOutput struct { // The unique identifier of the calling entity. The exact value depends on the // type of entity that is making the call. The values returned are those listed in - // the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) - // found on the Policy Variables reference page in the IAM User Guide. + // the aws:userid column in the [Principal table]found on the Policy Variables reference page in + // the IAM User Guide. + // + // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable UserId *string // Metadata pertaining to the operation's result. @@ -83,25 +87,25 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -116,10 +120,16 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index efaba119c92a..2bae67429f21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -6,7 +6,6 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/sts/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -15,74 +14,100 @@ import ( // Returns a set of temporary security credentials (consisting of an access key // ID, a secret access key, and a security token) for a user. A typical use is in a // proxy application that gets temporary security credentials on behalf of -// distributed applications inside a corporate network. You must call the -// GetFederationToken operation using the long-term security credentials of an IAM -// user. As a result, this call is appropriate in contexts where those credentials -// can be safeguarded, usually in a server-based application. For a comparison of -// GetFederationToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. Although it is possible to call GetFederationToken using -// the security credentials of an Amazon Web Services account root user rather than -// an IAM user that you create for the purpose of a proxy application, we do not -// recommend it. For more information, see Safeguard your root user credentials -// and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. Session duration The temporary credentials are valid for -// the specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600 -// seconds (36 hours). The default session duration is 43,200 seconds (12 hours). -// Temporary credentials obtained by using the root user credentials have a maximum -// duration of 3,600 seconds (1 hour). Permissions You can use the temporary -// credentials created by GetFederationToken in any Amazon Web Services service -// with the following exceptions: +// distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based +// application. For a comparison of GetFederationToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security +// credentials of an Amazon Web Services account root user rather than an IAM user +// that you create for the purpose of a proxy application, we do not recommend it. +// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by +// using the root user credentials have a maximum duration of 3,600 seconds (1 +// hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM operations using the CLI or the Amazon Web Services // API. This limitation does not apply to console sessions. +// // - You cannot call any STS operations except GetCallerIdentity . // -// You can use temporary credentials for single sign-on (SSO) to the console. You -// must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policy Amazon -// Resource Names (ARNs) to use as managed session policies. The plaintext that you -// use for both inline and managed session policies can't exceed 2,048 characters. +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed [session policy] to this operation. You can pass a single +// JSON policy document to use as an inline session policy. You can also specify up +// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session +// policies. The plaintext that you use for both inline and managed session +// policies can't exceed 2,048 characters. +// // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass // session policies, the session permissions are the intersection of the IAM user // policies and the session policies that you pass. This gives you a way to further // restrict the permissions for a federated user. You cannot use session policies // to grant more permissions than those that are defined in the permissions policy -// of the IAM user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to create -// temporary security credentials, see GetFederationToken—Federation Through a -// Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken) -// . You can use the credentials to access a resource that has a resource-based +// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For +// information about using GetFederationToken to create temporary security +// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker]. +// +// You can use the credentials to access a resource that has a resource-based // policy. If that policy specifically references the federated user session in the // Principal element of the policy, the session has the permissions allowed by the // policy. These permissions are granted in addition to the permissions granted by -// the session policies. Tags (Optional) You can pass tag key-value pairs to your -// session. These are called session tags. For more information about session tags, -// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You can create a mobile-based or browser-based app that -// can authenticate users using a web identity provider like Login with Amazon, -// Facebook, Google, or an OpenID Connect-compatible identity provider. In this -// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/) -// or AssumeRoleWithWebIdentity . For more information, see Federation Through a -// Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) -// in the IAM User Guide. An administrator must grant you the permissions necessary -// to pass session tags. The administrator can also create granular permissions to -// allow you to pass only specific session tags. For more information, see -// Tutorial: Using Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) -// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is -// preserved. This means that you cannot have separate Department and department -// tag keys. Assume that the user that you are federating has the Department = -// Marketing tag and you pass the department = engineering session tag. Department -// and department are not saved as separate tags, and the session tag passed in -// the request takes precedence over the user tag. +// the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User +// Guide. +// +// You can create a mobile-based or browser-based app that can authenticate users +// using a web identity provider like Login with Amazon, Facebook, Google, or an +// OpenID Connect-compatible identity provider. In this case, we recommend that you +// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User +// Guide. +// +// An administrator must grant you the permissions necessary to pass session tags. +// The administrator can also create granular permissions to allow you to pass only +// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This means +// that you cannot have separate Department and department tag keys. Assume that +// the user that you are federating has the Department = Marketing tag and you +// pass the department = engineering session tag. Department and department are +// not saved as separate tags, and the session tag passed in the request takes +// precedence over the user tag. +// +// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity +// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Amazon Cognito]: http://aws.amazon.com/cognito/ +// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html +// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) { if params == nil { params = &GetFederationTokenInput{} @@ -103,10 +128,11 @@ type GetFederationTokenInput struct { // The name of the federated user. The name is used as an identifier for the // temporary security credentials (such as Bob ). For example, you can reference // the federated user name in a resource-based policy, such as in an Amazon S3 - // bucket policy. The regex used to validate this parameter is a string of - // characters consisting of upper- and lower-case alphanumeric characters with no - // spaces. You can also include underscores or any of the following characters: - // =,.@- + // bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@- // // This member is required. Name *string @@ -120,99 +146,127 @@ type GetFederationTokenInput struct { DurationSeconds *int32 // An IAM policy in JSON format that you want to use as an inline session policy. - // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. This parameter is - // optional. However, if you do not pass any session policies, then the resulting - // federated user session has no permissions. When you pass session policies, the - // session permissions are the intersection of the IAM user policies and the - // session policies that you pass. This gives you a way to further restrict the - // permissions for a federated user. You cannot use session policies to grant more - // permissions than those that are defined in the permissions policy of the IAM - // user. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // The plaintext that you use for both inline and managed session policies can't // exceed 2,048 characters. The JSON policy characters can be any ASCII character // from the space character to the end of the valid character list (\u0020 through // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage - // return (\u000D) characters. An Amazon Web Services conversion compresses the - // passed inline session policy, managed policy ARNs, and session tags into a - // packed binary format that has a separate limit. Your request can fail for this - // limit even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session Policy *string // The Amazon Resource Names (ARNs) of the IAM managed policies that you want to // use as a managed session policy. The policies must exist in the same account as - // the IAM user that is requesting federated access. You must pass an inline or - // managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policy Amazon - // Resource Names (ARNs) to use as managed session policies. The plaintext that you - // use for both inline and managed session policies can't exceed 2,048 characters. - // You can provide up to 10 managed policy ARNs. For more information about ARNs, - // see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. This parameter is optional. - // However, if you do not pass any session policies, then the resulting federated - // user session has no permissions. When you pass session policies, the session - // permissions are the intersection of the IAM user policies and the session - // policies that you pass. This gives you a way to further restrict the permissions - // for a federated user. You cannot use session policies to grant more permissions - // than those that are defined in the permissions policy of the IAM user. For more - // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) - // in the IAM User Guide. The resulting credentials can be used to access a - // resource that has a resource-based policy. If that policy specifically - // references the federated user session in the Principal element of the policy, - // the session has the permissions allowed by the policy. These permissions are - // granted in addition to the permissions that are granted by the session policies. + // the IAM user that is requesting federated access. + // + // You must pass an inline or managed [session policy] to this operation. You can pass a single + // JSON policy document to use as an inline session policy. You can also specify up + // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session + // policies. The plaintext that you use for both inline and managed session + // policies can't exceed 2,048 characters. You can provide up to 10 managed policy + // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General + // Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection of + // the IAM user policies and the session policies that you pass. This gives you a + // way to further restrict the permissions for a federated user. You cannot use + // session policies to grant more permissions than those that are defined in the + // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User + // Guide. + // + // The resulting credentials can be used to access a resource that has a + // resource-based policy. If that policy specifically references the federated user + // session in the Principal element of the policy, the session has the permissions + // allowed by the policy. These permissions are granted in addition to the + // permissions that are granted by the session policies. + // // An Amazon Web Services conversion compresses the passed inline session policy, // managed policy ARNs, and session tags into a packed binary format that has a // separate limit. Your request can fail for this limit even if your plaintext // meets the other requirements. The PackedPolicySize response element indicates // by percentage how close the policies and tags for your request are to the upper // size limit. + // + // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html PolicyArns []types.PolicyDescriptorType // A list of session tags. Each session tag consists of a key name and an - // associated value. For more information about session tags, see Passing Session - // Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) - // in the IAM User Guide. This parameter is optional. You can pass up to 50 session - // tags. The plaintext session tag keys can’t exceed 128 characters and the values - // can’t exceed 256 characters. For these and additional limits, see IAM and STS - // Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. An Amazon Web Services conversion compresses the passed - // inline session policy, managed policy ARNs, and session tags into a packed - // binary format that has a separate limit. Your request can fail for this limit - // even if your plaintext meets the other requirements. The PackedPolicySize - // response element indicates by percentage how close the policies and tags for - // your request are to the upper size limit. You can pass a session tag with the - // same key as a tag that is already attached to the user you are federating. When - // you do, session tags override a user tag with the same key. Tag key–value pairs - // are not case sensitive, but case is preserved. This means that you cannot have - // separate Department and department tag keys. Assume that the role has the - // Department = Marketing tag and you pass the department = engineering session - // tag. Department and department are not saved as separate tags, and the session - // tag passed in the request takes precedence over the role tag. + // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User + // Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed 256 + // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has a + // separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the upper + // size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user tag + // with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This means + // that you cannot have separate Department and department tag keys. Assume that + // the role has the Department = Marketing tag and you pass the department = + // engineering session tag. Department and department are not saved as separate + // tags, and the session tag passed in the request takes precedence over the role + // tag. + // + // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length Tags []types.Tag noSmithyDocumentSerde } -// Contains the response to a successful GetFederationToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetFederationToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Identifiers for the federated user associated with the credentials (such as @@ -255,25 +309,25 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -288,13 +342,19 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index 7b07435f2252..c73316a3c04c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -6,7 +6,6 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" - "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/sts/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -16,43 +15,58 @@ import ( // IAM user. The credentials consist of an access key ID, a secret access key, and // a security token. Typically, you use GetSessionToken if you want to use MFA to // protect programmatic calls to specific Amazon Web Services API operations like -// Amazon EC2 StopInstances . MFA-enabled IAM users must call GetSessionToken and -// submit an MFA code that is associated with their MFA device. Using the temporary -// security credentials that the call returns, IAM users can then make programmatic -// calls to API operations that require MFA authentication. An incorrect MFA code -// causes the API to return an access denied error. For a comparison of -// GetSessionToken with the other API operations that produce temporary -// credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) -// in the IAM User Guide. No permissions are required for users to perform this -// operation. The purpose of the sts:GetSessionToken operation is to authenticate -// the user using MFA. You cannot use policies to control authentication -// operations. For more information, see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) -// in the IAM User Guide. Session Duration The GetSessionToken operation must be -// called by using the long-term Amazon Web Services security credentials of an IAM -// user. Credentials that are created by IAM users are valid for the duration that -// you specify. This duration can range from 900 seconds (15 minutes) up to a -// maximum of 129,600 seconds (36 hours), with a default of 43,200 seconds (12 -// hours). Credentials based on account credentials can range from 900 seconds (15 -// minutes) up to 3,600 seconds (1 hour), with a default of 1 hour. Permissions The -// temporary security credentials created by GetSessionToken can be used to make -// API calls to any Amazon Web Services service with the following exceptions: +// Amazon EC2 StopInstances . +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is +// associated with their MFA device. Using the temporary security credentials that +// the call returns, IAM users can then make programmatic calls to API operations +// that require MFA authentication. An incorrect MFA code causes the API to return +// an access denied error. For a comparison of GetSessionToken with the other API +// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Comparing the Amazon Web Services STS API operations] in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose of +// the sts:GetSessionToken operation is to authenticate the user using MFA. You +// cannot use policies to control authentication operations. For more information, +// see [Permissions for GetSessionToken]in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon Web +// Services security credentials of an IAM user. Credentials that are created by +// IAM users are valid for the duration that you specify. This duration can range +// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours), +// with a default of 43,200 seconds (12 hours). Credentials based on account +// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1 +// hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used to +// make API calls to any Amazon Web Services service with the following exceptions: +// // - You cannot call any IAM API operations unless MFA authentication // information is included in the request. +// // - You cannot call any STS API except AssumeRole or GetCallerIdentity . // // The credentials that GetSessionToken returns are based on permissions // associated with the IAM user whose credentials were used to call the operation. -// The temporary credentials have the same permissions as the IAM user. Although it -// is possible to call GetSessionToken using the security credentials of an Amazon -// Web Services account root user rather than an IAM user, we do not recommend it. -// If GetSessionToken is called using root user credentials, the temporary -// credentials have root user permissions. For more information, see Safeguard -// your root user credentials and don't use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) -// in the IAM User Guide For more information about using GetSessionToken to -// create temporary credentials, see Temporary Credentials for Users in Untrusted -// Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) -// in the IAM User Guide. +// The temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do not +// recommend it. If GetSessionToken is called using root user credentials, the +// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in +// the IAM User Guide +// +// For more information about using GetSessionToken to create temporary +// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide. +// +// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html +// [Comparing the Amazon Web Services STS API operations]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison +// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken +// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials +// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) { if params == nil { params = &GetSessionTokenInput{} @@ -84,10 +98,11 @@ type GetSessionTokenInput struct { // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You // can find the device for an IAM user by going to the Amazon Web Services - // Management Console and viewing the user's security credentials. The regex used - // to validate this parameter is a string of characters consisting of upper- and - // lower-case alphanumeric characters with no spaces. You can also include - // underscores or any of the following characters: =,.@:/- + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can also + // include underscores or any of the following characters: =,.@:/- SerialNumber *string // The value provided by the MFA device, if MFA is required. If any policy @@ -95,22 +110,24 @@ type GetSessionTokenInput struct { // authentication is required, the user must provide a code when requesting a set // of temporary security credentials. A user who fails to provide the code receives // an "access denied" response when requesting resources that require MFA - // authentication. The format for this parameter, as described by its regex - // pattern, is a sequence of six numeric digits. + // authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. TokenCode *string noSmithyDocumentSerde } -// Contains the response to a successful GetSessionToken request, including -// temporary Amazon Web Services credentials that can be used to make Amazon Web -// Services requests. +// Contains the response to a successful GetSessionToken request, including temporary Amazon Web +// Services credentials that can be used to make Amazon Web Services requests. type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret - // access key, and a security (or session) token. The size of the security token - // that STS API operations return is not fixed. We strongly recommend that you make - // no assumptions about the maximum size. + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. Credentials *types.Credentials // Metadata pertaining to the operation's result. @@ -141,25 +158,25 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addSetLoggerMiddleware(stack, options); err != nil { return err } - if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + if err = addClientRequestID(stack); err != nil { return err } - if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + if err = addComputeContentLength(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } - if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + if err = addComputePayloadSHA256(stack); err != nil { return err } - if err = addRetryMiddlewares(stack, options); err != nil { + if err = addRetry(stack, options); err != nil { return err } - if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + if err = addRawResponseToMetadata(stack); err != nil { return err } - if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + if err = addRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { @@ -174,10 +191,16 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { return err } - if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + if err = addRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go index 9db5bfd43482..e842a7f7e8ef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go @@ -12,7 +12,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } @@ -90,12 +90,12 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthParamsRegion(params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -157,7 +157,7 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go index 5d634ce35c8f..7e4346ec9fa1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go @@ -20,8 +20,17 @@ import ( "io" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsAwsquery_deserializeOpAssumeRole struct { } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go index d963fd8d19ac..cbb19c7f6686 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go @@ -3,9 +3,11 @@ // Package sts provides the API client, operations, and parameter types for AWS // Security Token Service. // -// Security Token Service Security Token Service (STS) enables you to request -// temporary, limited-privilege credentials for users. This guide provides -// descriptions of the STS API. For more information about using this service, see -// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// . +// # Security Token Service +// +// Security Token Service (STS) enables you to request temporary, +// limited-privilege credentials for users. This guide provides descriptions of the +// STS API. For more information about using this service, see [Temporary Security Credentials]. +// +// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html package sts diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index 9f7932f9a067..35305d8976fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -217,6 +217,13 @@ func resolveBaseEndpoint(cfg aws.Config, o *Options) { } } +func bindRegion(region string) *string { + if region == "" { + return nil + } + return aws.String(endpoints.MapFIPSRegion(region)) +} + // EndpointParameters provides the parameters that influence how endpoints are // resolved. type EndpointParameters struct { @@ -299,6 +306,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -1038,10 +1056,10 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} - params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.Region = bindRegion(options.Region) params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) params.Endpoint = options.BaseEndpoint @@ -1068,6 +1086,10 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return next.HandleFinalize(ctx, in) } + if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil { + return out, metadata, fmt.Errorf("invalid accountID set: %w", err) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -1077,7 +1099,7 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index d90b8bce4b41..6b6e839e6c97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -5,8 +5,7 @@ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", @@ -31,6 +30,7 @@ "options.go", "protocol_test.go", "serializers.go", + "snapshot_test.go", "types/errors.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 962c336cf91e..84e221f2b96d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.7" +const goModuleVersion = "1.30.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go index 5c1be79f8c07..a9a35881affe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go @@ -24,6 +24,9 @@ type Options struct { // modify this list for per operation behavior. APIOptions []func(*middleware.Stack) error + // Indicates how aws account ID is applied in endpoint2.0 routing + AccountIDEndpointMode aws.AccountIDEndpointMode + // The optional application specific identifier appended to the User-Agent header. AppID string @@ -50,8 +53,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -70,17 +75,20 @@ type Options struct { // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -97,8 +105,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -143,6 +152,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go index 097875b279b3..9573a4b6461f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go @@ -65,9 +65,10 @@ func (e *IDPCommunicationErrorException) ErrorCode() string { func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } // The identity provider (IdP) reported that authentication failed. This might be -// because the claim is invalid. If this error is returned for the -// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired -// or has been explicitly revoked. +// because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it can +// also mean that the claim has expired or has been explicitly revoked. type IDPRejectedClaimException struct { Message *string @@ -183,11 +184,13 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu // compresses the session policy document, session policy ARNs, and session tags // into a packed binary format that has a separate limit. The error message // indicates by percentage how close the policies and tags are to the upper size -// limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. You could receive this error even though you meet other -// defined session policy and session tag limits. For more information, see IAM -// and STS Entity Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. +// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide. +// +// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html +// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length type PackedPolicyTooLargeException struct { Message *string @@ -215,9 +218,10 @@ func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// console to activate STS in that region. For more information, see [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]in the IAM +// User Guide. +// +// [Activating and Deactivating Amazon Web Services STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html type RegionDisabledException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go index e3701d11d153..dff7a3c2e769 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go @@ -11,10 +11,11 @@ import ( // returns. type AssumedRoleUser struct { - // The ARN of the temporary security credentials that are returned from the - // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // The ARN of the temporary security credentials that are returned from the AssumeRole + // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in + // the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -61,8 +62,9 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the // credentials. For more information about ARNs and how to use them in policies, - // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in the IAM User Guide. + // see [IAM Identifiers]in the IAM User Guide. + // + // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html // // This member is required. Arn *string @@ -81,9 +83,10 @@ type FederatedUser struct { type PolicyDescriptorType struct { // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session - // policy for the role. For more information about ARNs, see Amazon Resource Names - // (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. + // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web + // Services General Reference. + // + // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html Arn *string noSmithyDocumentSerde @@ -107,23 +110,30 @@ type ProvidedContext struct { // You can pass custom key-value pair attributes when you assume a role or // federate a user. These are called session tags. You can then use the session -// tags to control access to resources. For more information, see Tagging Amazon -// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. +// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User +// Guide. +// +// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html type Tag struct { - // The key for a session tag. You can pass up to 50 session tags. The plain text - // session tag keys can’t exceed 128 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Key *string - // The value for a session tag. You can pass up to 50 session tags. The plain text - // session tag values can’t exceed 256 characters. For these and additional limits, - // see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) - // in the IAM User Guide. + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User + // Guide. + // + // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length // // This member is required. Value *string diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore index c92d6105eb32..2518b3491549 100644 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -24,3 +24,6 @@ build/ # VS Code bin/ .vscode/ + +# make +c.out diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 46b115083d18..bdbc7b43650c 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,26 @@ +# Release (2024-06-27) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.3 + * **Bug Fix**: Fix encoding/cbor test overflow on x86. + +# Release (2024-03-29) + +* No change notes available for this release. + +# Release (2024-02-21) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.1 + * **Bug Fix**: Remove runtime dependency on go-cmp. + +# Release (2024-02-13) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.20.0 + * **Feature**: Add codegen definition for sigv4a trait. + * **Feature**: Bump minimum Go version to 1.20 per our language support policy. + # Release (2023-12-07) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile index 4b3c209373c4..e66fa8caceb1 100644 --- a/vendor/github.com/aws/smithy-go/Makefile +++ b/vendor/github.com/aws/smithy-go/Makefile @@ -33,13 +33,18 @@ smithy-clean: ################## # Linting/Verify # ################## -.PHONY: verify vet +.PHONY: verify vet cover verify: vet vet: go vet ${BUILD_TAGS} --all ./... +cover: + go test ${BUILD_TAGS} -coverprofile c.out ./... + @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \ + echo "total (statements): $$cover%"; + ################ # Unit Testing # ################ diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index cd6f7fa45c40..f82b767255ae 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.19.0" +const goModuleVersion = "1.20.3" diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml index 20295cdd2aa4..9d94b7cbd0a2 100644 --- a/vendor/github.com/aws/smithy-go/modman.toml +++ b/vendor/github.com/aws/smithy-go/modman.toml @@ -1,5 +1,4 @@ [dependencies] - "github.com/google/go-cmp" = "v0.5.8" "github.com/jmespath/go-jmespath" = "v0.4.0" [modules] diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE deleted file mode 100644 index 584149b6ee28..000000000000 --- a/vendor/github.com/containerd/containerd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f02773f5..000000000000 --- a/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go deleted file mode 100644 index 23ddfab1a6bf..000000000000 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ /dev/null @@ -1,323 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package compression - -import ( - "bufio" - "bytes" - "compress/gzip" - "context" - "encoding/binary" - "fmt" - "io" - "os" - "os/exec" - "strconv" - "sync" - - "github.com/containerd/log" - "github.com/klauspost/compress/zstd" -) - -type ( - // Compression is the state represents if compressed or not. - Compression int -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Gzip is gzip compression algorithm. - Gzip - // Zstd is zstd compression algorithm. - Zstd -) - -const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" - -var ( - initPigz sync.Once - unpigzPath string -) - -var ( - bufioReader32KPool = &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, - } -) - -// DecompressReadCloser include the stream after decompress and the compress method detected. -type DecompressReadCloser interface { - io.ReadCloser - // GetCompression returns the compress method which is used before decompressing - GetCompression() Compression -} - -type readCloserWrapper struct { - io.Reader - compression Compression - closer func() error -} - -func (r *readCloserWrapper) Close() error { - if r.closer != nil { - return r.closer() - } - return nil -} - -func (r *readCloserWrapper) GetCompression() Compression { - return r.compression -} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (w *writeCloserWrapper) Close() error { - if w.closer != nil { - w.closer() - } - return nil -} - -type bufferedReader struct { - buf *bufio.Reader -} - -func newBufferedReader(r io.Reader) *bufferedReader { - buf := bufioReader32KPool.Get().(*bufio.Reader) - buf.Reset(r) - return &bufferedReader{buf} -} - -func (r *bufferedReader) Read(p []byte) (n int, err error) { - if r.buf == nil { - return 0, io.EOF - } - n, err = r.buf.Read(p) - if err == io.EOF { - r.buf.Reset(nil) - bufioReader32KPool.Put(r.buf) - r.buf = nil - } - return -} - -func (r *bufferedReader) Peek(n int) ([]byte, error) { - if r.buf == nil { - return nil, io.EOF - } - return r.buf.Peek(n) -} - -const ( - zstdMagicSkippableStart = 0x184D2A50 - zstdMagicSkippableMask = 0xFFFFFFF0 -) - -var ( - gzipMagic = []byte{0x1F, 0x8B, 0x08} - zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} -) - -type matcher = func([]byte) bool - -func magicNumberMatcher(m []byte) matcher { - return func(source []byte) bool { - return bytes.HasPrefix(source, m) - } -} - -// zstdMatcher detects zstd compression algorithm. -// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details. -func zstdMatcher() matcher { - return func(source []byte) bool { - if bytes.HasPrefix(source, zstdMagic) { - // Zstandard frame - return true - } - // skippable frame - if len(source) < 8 { - return false - } - // magic number from 0x184D2A50 to 0x184D2A5F. - if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart { - return true - } - return false - } -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, fn := range map[Compression]matcher{ - Gzip: magicNumberMatcher(gzipMagic), - Zstd: zstdMatcher(), - } { - if fn(source) { - return compression - } - } - return Uncompressed -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { - buf := newBufferedReader(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue docker/docker#18170 - return nil, err - } - - switch compression := DetectCompression(bs); compression { - case Uncompressed: - return &readCloserWrapper{ - Reader: buf, - compression: compression, - }, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - gzReader, err := gzipDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - - return &readCloserWrapper{ - Reader: gzReader, - compression: compression, - closer: func() error { - cancel() - return gzReader.Close() - }, - }, nil - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, err - } - return &readCloserWrapper{ - Reader: zstdReader, - compression: compression, - closer: func() error { - zstdReader.Close() - return nil - }, - }, nil - - default: - return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - switch compression { - case Uncompressed: - return &writeCloserWrapper{dest, nil}, nil - case Gzip: - return gzip.NewWriter(dest), nil - case Zstd: - return zstd.NewWriter(dest) - default: - return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Gzip: - return "gz" - case Zstd: - return "zst" - } - return "" -} - -func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - initPigz.Do(func() { - if unpigzPath = detectPigz(); unpigzPath != "" { - log.L.Debug("using pigz for decompression") - } - }) - - if unpigzPath == "" { - return gzip.NewReader(buf) - } - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { - reader, writer := io.Pipe() - - cmd.Stdin = in - cmd.Stdout = writer - - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - if err := cmd.Start(); err != nil { - return nil, err - } - - go func() { - if err := cmd.Wait(); err != nil { - writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - writer.Close() - } - }() - - return reader, nil -} - -func detectPigz() string { - path, err := exec.LookPath("unpigz") - if err != nil { - log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") - return "" - } - - // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable - value := os.Getenv(disablePigzEnv) - if value == "" { - return path - } - - disable, err := strconv.ParseBool(value) - if err != nil { - log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) - return path - } - - if disable { - return "" - } - - return path -} diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go deleted file mode 100644 index 3516494ac0bd..000000000000 --- a/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package compression - -import ( - "bytes" -) - -func FuzzDecompressStream(data []byte) int { - _, _ = DecompressStream(bytes.NewReader(data)) - return 1 -} diff --git a/vendor/github.com/containerd/containerd/archive/link_default.go b/vendor/github.com/containerd/containerd/archive/link_default.go deleted file mode 100644 index 84a8997eb510..000000000000 --- a/vendor/github.com/containerd/containerd/archive/link_default.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import "os" - -func link(oldname, newname string) error { - return os.Link(oldname, newname) -} diff --git a/vendor/github.com/containerd/containerd/archive/link_freebsd.go b/vendor/github.com/containerd/containerd/archive/link_freebsd.go deleted file mode 100644 index 2097db06bc7d..000000000000 --- a/vendor/github.com/containerd/containerd/archive/link_freebsd.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func link(oldname, newname string) error { - e := ignoringEINTR(func() error { - return unix.Linkat(unix.AT_FDCWD, oldname, unix.AT_FDCWD, newname, 0) - }) - if e != nil { - return &os.LinkError{Op: "link", Old: oldname, New: newname, Err: e} - } - return nil -} - -// The following contents were copied from Go 1.18.2. -// Use of this source code is governed by the following -// BSD-style license: -// -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// ignoringEINTR makes a function call and repeats it if it returns an -// EINTR error. This appears to be required even though we install all -// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846. -// Also #20400 and #36644 are issues in which a signal handler is -// installed without setting SA_RESTART. None of these are the common case, -// but there are enough of them that it seems that we can't avoid -// an EINTR loop. -func ignoringEINTR(fn func() error) error { - for { - err := fn() - if err != syscall.EINTR { - return err - } - } -} diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go deleted file mode 100644 index c61f89ec8d8a..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ /dev/null @@ -1,823 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "archive/tar" - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/moby/sys/userns" - - "github.com/containerd/containerd/archive/tarheader" - "github.com/containerd/containerd/pkg/epoch" - "github.com/containerd/continuity/fs" - "github.com/containerd/log" -) - -var bufPool = &sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 32*1024) - return &buffer - }, -} - -var errInvalidArchive = errors.New("invalid archive") - -// Diff returns a tar stream of the computed filesystem -// difference between the provided directories. -// -// Produces a tar using OCI style file markers for deletions. Deleted -// files will be prepended with the prefix ".wh.". This style is -// based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/main/layer.md -func Diff(ctx context.Context, a, b string, opts ...WriteDiffOpt) io.ReadCloser { - r, w := io.Pipe() - - go func() { - err := WriteDiff(ctx, w, a, b, opts...) - if err != nil { - log.G(ctx).WithError(err).Debugf("write diff failed") - } - if err = w.CloseWithError(err); err != nil { - log.G(ctx).WithError(err).Debugf("closing tar pipe failed") - } - }() - - return r -} - -// WriteDiff writes a tar stream of the computed difference between the -// provided paths. -// -// Produces a tar using OCI style file markers for deletions. Deleted -// files will be prepended with the prefix ".wh.". This style is -// based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/main/layer.md -func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffOpt) error { - var options WriteDiffOptions - for _, opt := range opts { - if err := opt(&options); err != nil { - return fmt.Errorf("failed to apply option: %w", err) - } - } - if tm := epoch.FromContext(ctx); tm != nil && options.SourceDateEpoch == nil { - options.SourceDateEpoch = tm - } - - if options.writeDiffFunc == nil { - options.writeDiffFunc = writeDiffNaive - } - - return options.writeDiffFunc(ctx, w, a, b, options) -} - -// writeDiffNaive writes a tar stream of the computed difference between the -// provided directories on disk. -// -// Produces a tar using OCI style file markers for deletions. Deleted -// files will be prepended with the prefix ".wh.". This style is -// based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/main/layer.md -func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, o WriteDiffOptions) error { - var opts []ChangeWriterOpt - if o.SourceDateEpoch != nil { - opts = append(opts, - WithModTimeUpperBound(*o.SourceDateEpoch), - WithWhiteoutTime(*o.SourceDateEpoch)) - } - cw := NewChangeWriter(w, b, opts...) - err := fs.Changes(ctx, a, b, cw.HandleChange) - if err != nil { - return fmt.Errorf("failed to create diff tar stream: %w", err) - } - return cw.Close() -} - -const ( - // whiteoutPrefix prefix means file is a whiteout. If this is followed by a - // filename this means that file has been removed from the base layer. - // See https://github.com/opencontainers/image-spec/blob/main/layer.md#whiteouts - whiteoutPrefix = ".wh." - - // whiteoutMetaPrefix prefix means whiteout has a special meaning and is not - // for removing an actual file. Normally these files are excluded from exported - // archives. - whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix - - // whiteoutOpaqueDir file means directory has been made opaque - meaning - // readdir calls to this directory do not follow to lower layers. - whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" - - paxSchilyXattr = "SCHILY.xattr." - - userXattrPrefix = "user." -) - -// Apply applies a tar stream of an OCI style diff tar. -// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets -func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) { - root = filepath.Clean(root) - - var options ApplyOptions - for _, opt := range opts { - if err := opt(&options); err != nil { - return 0, fmt.Errorf("failed to apply option: %w", err) - } - } - if options.Filter == nil { - options.Filter = all - } - if options.applyFunc == nil { - options.applyFunc = applyNaive - } - - return options.applyFunc(ctx, root, r, options) -} - -// applyNaive applies a tar stream of an OCI style diff tar to a directory -// applying each file as either a whole file or whiteout. -// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets -func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) { - var ( - dirs []*tar.Header - - tr = tar.NewReader(r) - - // Used for handling opaque directory markers which - // may occur out of order - unpackedPaths = make(map[string]struct{}) - - convertWhiteout = options.ConvertWhiteout - ) - - if convertWhiteout == nil { - // handle whiteouts by removing the target files - convertWhiteout = func(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - if base == whiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return false, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - return false, err - } - - if strings.HasPrefix(base, whiteoutPrefix) { - originalBase := base[len(whiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - return false, os.RemoveAll(originalPath) - } - - return true, nil - } - } - - // Iterate through the files in the archive. - for { - select { - case <-ctx.Done(): - return 0, ctx.Err() - default: - } - - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - accept, err := options.Filter(hdr) - if err != nil { - return 0, err - } - if !accept { - continue - } - - if skipFile(hdr) { - log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name) - continue - } - - // Split name and resolve symlinks for root directory. - ppath, base := filepath.Split(hdr.Name) - ppath, err = fs.RootPath(root, ppath) - if err != nil { - return 0, fmt.Errorf("failed to get root path: %w", err) - } - - // Join to root before joining to parent path to ensure relative links are - // already resolved based on the root before adding to parent. - path := filepath.Join(ppath, filepath.Join("/", base)) - if path == root { - log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name) - continue - } - - // If file is not directly under root, ensure parent directory - // exists or is created. - if ppath != root { - parentPath := ppath - if base == "" { - parentPath = filepath.Dir(path) - } - if err := mkparent(ctx, parentPath, root, options.Parents); err != nil { - return 0, err - } - } - - // Naive whiteout convert function which handles whiteout files by - // removing the target files. - if err := validateWhiteout(path); err != nil { - return 0, err - } - writeFile, err := convertWhiteout(hdr, path) - if err != nil { - return 0, fmt.Errorf("failed to convert whiteout file %q: %w", hdr.Name, err) - } - if !writeFile { - continue - } - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - srcData := io.Reader(tr) - srcHdr := hdr - - if err := createTarFile(ctx, path, root, srcHdr, srcData, options.NoSameOwner); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - - for _, hdr := range dirs { - path, err := fs.RootPath(root, hdr.Name) - if err != nil { - return 0, err - } - if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil { - return 0, err - } - } - - return size, nil -} - -func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader, noSameOwner bool) error { - // hdr.Mode is in linux format, which we can use for syscalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA - case tar.TypeReg, tar.TypeRegA: - file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode()) - if err != nil { - return err - } - - _, err = copyBuffered(ctx, file, reader) - if err1 := file.Close(); err == nil { - err = err1 - } - if err != nil { - return err - } - - case tar.TypeBlock, tar.TypeChar: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath, err := hardlinkRootPath(extractDir, hdr.Linkname) - if err != nil { - return err - } - - if err := link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - log.G(ctx).Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - if !noSameOwner { - if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { - err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err) - if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { - err = fmt.Errorf("%w (Hint: try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)", err) - } - return err - } - } - - for key, value := range hdr.PAXRecords { - if strings.HasPrefix(key, paxSchilyXattr) { - key = key[len(paxSchilyXattr):] - if err := setxattr(path, key, value); err != nil { - if errors.Is(err, syscall.EPERM) && strings.HasPrefix(key, userXattrPrefix) { - // In the user.* namespace, only regular files and directories can have extended attributes. - // See https://man7.org/linux/man-pages/man7/xattr.7.html for details. - if fi, err := os.Lstat(path); err == nil && (!fi.Mode().IsRegular() && !fi.Mode().IsDir()) { - log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) - continue - } - } - if errors.Is(err, syscall.ENOTSUP) { - log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key) - continue - } - return fmt.Errorf("failed to setxattr %q for key %q: %w", path, key, err) - } - } - } - - // call lchmod after lchown since lchown can modify the file mode - if err := lchmod(path, hdrInfo.Mode()); err != nil { - return err - } - - return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)) -} - -func mkparent(ctx context.Context, path, root string, parents []string) error { - if dir, err := os.Lstat(path); err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkparent", - Path: path, - Err: syscall.ENOTDIR, - } - } else if !os.IsNotExist(err) { - return err - } - - i := len(path) - for i > len(root) && !os.IsPathSeparator(path[i-1]) { - i-- - } - - if i > len(root)+1 { - if err := mkparent(ctx, path[:i-1], root, parents); err != nil { - return err - } - } - - if err := mkdir(path, 0755); err != nil { - // Check that still doesn't exist - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - - for _, p := range parents { - ppath, err := fs.RootPath(p, path[len(root):]) - if err != nil { - return err - } - - dir, err := os.Lstat(ppath) - if err == nil { - if !dir.IsDir() { - // Replaced, do not copy attributes - break - } - if err := copyDirInfo(dir, path); err != nil { - return err - } - return copyUpXAttrs(path, ppath) - } else if !os.IsNotExist(err) { - return err - } - } - - log.G(ctx).Debugf("parent directory %q not found: default permissions(0755) used", path) - - return nil -} - -// ChangeWriter provides tar stream from filesystem change information. -// The privided tar stream is styled as an OCI layer. Change information -// (add/modify/delete/unmodified) for each file needs to be passed to this -// writer through HandleChange method. -// -// This should be used combining with continuity's diff computing functionality -// (e.g. `fs.Change` of github.com/containerd/continuity/fs). -// -// See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details -// about OCI layers -type ChangeWriter struct { - tw *tar.Writer - source string - modTimeUpperBound *time.Time - whiteoutT time.Time - inodeSrc map[uint64]string - inodeRefs map[uint64][]string - addedDirs map[string]struct{} -} - -// ChangeWriterOpt can be specified in NewChangeWriter. -type ChangeWriterOpt func(cw *ChangeWriter) - -// WithModTimeUpperBound sets the mod time upper bound. -func WithModTimeUpperBound(tm time.Time) ChangeWriterOpt { - return func(cw *ChangeWriter) { - cw.modTimeUpperBound = &tm - } -} - -// WithWhiteoutTime sets the whiteout timestamp. -func WithWhiteoutTime(tm time.Time) ChangeWriterOpt { - return func(cw *ChangeWriter) { - cw.whiteoutT = tm - } -} - -// NewChangeWriter returns ChangeWriter that writes tar stream of the source directory -// to the privided writer. Change information (add/modify/delete/unmodified) for each -// file needs to be passed through HandleChange method. -func NewChangeWriter(w io.Writer, source string, opts ...ChangeWriterOpt) *ChangeWriter { - cw := &ChangeWriter{ - tw: tar.NewWriter(w), - source: source, - whiteoutT: time.Now(), // can be overridden with WithWhiteoutTime(time.Time) ChangeWriterOpt . - inodeSrc: map[uint64]string{}, - inodeRefs: map[uint64][]string{}, - addedDirs: map[string]struct{}{}, - } - for _, o := range opts { - o(cw) - } - return cw -} - -// HandleChange receives filesystem change information and reflect that information to -// the result tar stream. This function implements `fs.ChangeFunc` of continuity -// (github.com/containerd/continuity/fs) and should be used with that package. -func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error { - if err != nil { - return err - } - if k == fs.ChangeKindDelete { - whiteOutDir := filepath.Dir(p) - whiteOutBase := filepath.Base(p) - whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase) - hdr := &tar.Header{ - Typeflag: tar.TypeReg, - Name: whiteOut[1:], - Size: 0, - ModTime: cw.whiteoutT, - AccessTime: cw.whiteoutT, - ChangeTime: cw.whiteoutT, - } - if err := cw.includeParents(hdr); err != nil { - return err - } - if err := cw.tw.WriteHeader(hdr); err != nil { - return fmt.Errorf("failed to write whiteout header: %w", err) - } - } else { - var ( - link string - err error - source = filepath.Join(cw.source, p) - ) - - switch { - case f.Mode()&os.ModeSocket != 0: - return nil // ignore sockets - case f.Mode()&os.ModeSymlink != 0: - if link, err = os.Readlink(source); err != nil { - return err - } - } - - // Use FileInfoHeaderNoLookups to avoid propagating user names and group names from the host - hdr, err := tarheader.FileInfoHeaderNoLookups(f, link) - if err != nil { - return err - } - - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - // truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead - hdr.Format = tar.FormatPAX - if cw.modTimeUpperBound != nil && hdr.ModTime.After(*cw.modTimeUpperBound) { - hdr.ModTime = *cw.modTimeUpperBound - } - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - - name := p - if strings.HasPrefix(name, string(filepath.Separator)) { - name, err = filepath.Rel(string(filepath.Separator), name) - if err != nil { - return fmt.Errorf("failed to make path relative: %w", err) - } - } - // Canonicalize to POSIX-style paths using forward slashes. Directory - // entries must end with a slash. - name = filepath.ToSlash(name) - if f.IsDir() && !strings.HasSuffix(name, "/") { - name += "/" - } - hdr.Name = name - - if err := setHeaderForSpecialDevice(hdr, name, f); err != nil { - return fmt.Errorf("failed to set device headers: %w", err) - } - - // additionalLinks stores file names which must be linked to - // this file when this file is added - var additionalLinks []string - inode, isHardlink := fs.GetLinkInfo(f) - if isHardlink { - // If the inode has a source, always link to it - if source, ok := cw.inodeSrc[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = source - hdr.Size = 0 - } else { - if k == fs.ChangeKindUnmodified { - cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name) - return nil - } - cw.inodeSrc[inode] = name - additionalLinks = cw.inodeRefs[inode] - delete(cw.inodeRefs, inode) - } - } else if k == fs.ChangeKindUnmodified { - // Nothing to write to diff - return nil - } - - if capability, err := getxattr(source, "security.capability"); err != nil { - return fmt.Errorf("failed to get capabilities xattr: %w", err) - } else if len(capability) > 0 { - if hdr.PAXRecords == nil { - hdr.PAXRecords = map[string]string{} - } - hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability) - } - - if err := cw.includeParents(hdr); err != nil { - return err - } - if err := cw.tw.WriteHeader(hdr); err != nil { - return fmt.Errorf("failed to write file header: %w", err) - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - file, err := open(source) - if err != nil { - return fmt.Errorf("failed to open path: %v: %w", source, err) - } - defer file.Close() - - n, err := copyBuffered(context.TODO(), cw.tw, file) - if err != nil { - return fmt.Errorf("failed to copy: %w", err) - } - if n != hdr.Size { - return errors.New("short write copying file") - } - } - - if additionalLinks != nil { - source = hdr.Name - for _, extra := range additionalLinks { - hdr.Name = extra - hdr.Typeflag = tar.TypeLink - hdr.Linkname = source - hdr.Size = 0 - - if err := cw.includeParents(hdr); err != nil { - return err - } - if err := cw.tw.WriteHeader(hdr); err != nil { - return fmt.Errorf("failed to write file header: %w", err) - } - } - } - } - return nil -} - -// Close closes this writer. -func (cw *ChangeWriter) Close() error { - if err := cw.tw.Close(); err != nil { - return fmt.Errorf("failed to close tar writer: %w", err) - } - return nil -} - -func (cw *ChangeWriter) includeParents(hdr *tar.Header) error { - if cw.addedDirs == nil { - return nil - } - name := strings.TrimRight(hdr.Name, "/") - fname := filepath.Join(cw.source, name) - parent := filepath.Dir(name) - pname := filepath.Join(cw.source, parent) - - // Do not include root directory as parent - if fname != cw.source && pname != cw.source { - _, ok := cw.addedDirs[parent] - if !ok { - cw.addedDirs[parent] = struct{}{} - fi, err := os.Stat(pname) - if err != nil { - return err - } - if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil { - return err - } - } - } - if hdr.Typeflag == tar.TypeDir { - cw.addedDirs[name] = struct{}{} - } - return nil -} - -func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { - buf := bufPool.Get().(*[]byte) - defer bufPool.Put(buf) - - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - default: - } - - nr, er := src.Read(*buf) - if nr > 0 { - nw, ew := dst.Write((*buf)[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - if er != io.EOF { - err = er - } - break - } - } - return written, err - -} - -// hardlinkRootPath returns target linkname, evaluating and bounding any -// symlink to the parent directory. -// -// NOTE: Allow hardlink to the softlink, not the real one. For example, -// -// touch /tmp/zzz -// ln -s /tmp/zzz /tmp/xxx -// ln /tmp/xxx /tmp/yyy -// -// /tmp/yyy should be softlink which be same of /tmp/xxx, not /tmp/zzz. -func hardlinkRootPath(root, linkname string) (string, error) { - ppath, base := filepath.Split(linkname) - ppath, err := fs.RootPath(root, ppath) - if err != nil { - return "", err - } - - targetPath := filepath.Join(ppath, base) - if !strings.HasPrefix(targetPath, root) { - targetPath = root - } - return targetPath, nil -} - -func validateWhiteout(path string) error { - base := filepath.Base(path) - dir := filepath.Dir(path) - - if base == whiteoutOpaqueDir { - return nil - } - - if strings.HasPrefix(base, whiteoutPrefix) { - originalBase := base[len(whiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - // Ensure originalPath is under dir - if dir[len(dir)-1] != filepath.Separator { - dir += string(filepath.Separator) - } - if !strings.HasPrefix(originalPath, dir) { - return fmt.Errorf("invalid whiteout name: %v: %w", base, errInvalidArchive) - } - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_freebsd.go b/vendor/github.com/containerd/containerd/archive/tar_freebsd.go deleted file mode 100644 index fb5abff0ec23..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_freebsd.go +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// mknod wraps unix.Mknod. FreeBSD's unix.Mknod signature is different from -// other Unix and Unix-like operating systems. -func mknod(path string, mode uint32, dev uint64) error { - return unix.Mknod(path, mode, dev) -} - -// lsetxattrCreate wraps unix.Lsetxattr with FreeBSD-specific flags and errors -func lsetxattrCreate(link string, attr string, data []byte) error { - err := unix.Lsetxattr(link, attr, data, 0) - if err == unix.ENOTSUP || err == unix.EEXIST { - return nil - } - return err -} - -func lchmod(path string, mode os.FileMode) error { - err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - err = &os.PathError{Op: "lchmod", Path: path, Err: err} - } - return err -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_mostunix.go b/vendor/github.com/containerd/containerd/archive/tar_mostunix.go deleted file mode 100644 index ca5e602a0311..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_mostunix.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build !windows && !freebsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// mknod wraps Unix.Mknod and casts dev to int -func mknod(path string, mode uint32, dev uint64) error { - return unix.Mknod(path, mode, int(dev)) -} - -// lsetxattrCreate wraps unix.Lsetxattr, passes the unix.XATTR_CREATE flag on -// supported operating systems,and ignores appropriate errors -func lsetxattrCreate(link string, attr string, data []byte) error { - err := unix.Lsetxattr(link, attr, data, unix.XATTR_CREATE) - if err == unix.ENOTSUP || err == unix.ENODATA || err == unix.EEXIST { - return nil - } - return err -} - -// lchmod checks for symlink and changes the mode if not a symlink -func lchmod(path string, mode os.FileMode) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - if fi.Mode()&os.ModeSymlink == 0 { - if err := os.Chmod(path, mode); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts.go b/vendor/github.com/containerd/containerd/archive/tar_opts.go deleted file mode 100644 index ac3a03c8d4a5..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_opts.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "archive/tar" - "context" - "io" - "time" -) - -// ApplyOptions provides additional options for an Apply operation -type ApplyOptions struct { - Filter Filter // Filter tar headers - ConvertWhiteout ConvertWhiteout // Convert whiteout files - Parents []string // Parent directories to handle inherited attributes without CoW - NoSameOwner bool // NoSameOwner will not attempt to preserve the owner specified in the tar archive. - - applyFunc func(context.Context, string, io.Reader, ApplyOptions) (int64, error) -} - -// ApplyOpt allows setting mutable archive apply properties on creation -type ApplyOpt func(options *ApplyOptions) error - -// Filter specific files from the archive -type Filter func(*tar.Header) (bool, error) - -// ConvertWhiteout converts whiteout files from the archive -type ConvertWhiteout func(*tar.Header, string) (bool, error) - -// all allows all files -func all(_ *tar.Header) (bool, error) { - return true, nil -} - -// WithFilter uses the filter to select which files are to be extracted. -func WithFilter(f Filter) ApplyOpt { - return func(options *ApplyOptions) error { - options.Filter = f - return nil - } -} - -// WithConvertWhiteout uses the convert function to convert the whiteout files. -func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt { - return func(options *ApplyOptions) error { - options.ConvertWhiteout = c - return nil - } -} - -// WithNoSameOwner is same as '--no-same-owner` in 'tar' command. -// It'll skip attempt to preserve the owner specified in the tar archive. -func WithNoSameOwner() ApplyOpt { - return func(options *ApplyOptions) error { - options.NoSameOwner = true - return nil - } -} - -// WithParents provides parent directories for resolving inherited attributes -// directory from the filesystem. -// Inherited attributes are searched from first to last, making the first -// element in the list the most immediate parent directory. -// NOTE: When applying to a filesystem which supports CoW, file attributes -// should be inherited by the filesystem. -func WithParents(p []string) ApplyOpt { - return func(options *ApplyOptions) error { - options.Parents = p - return nil - } -} - -// WriteDiffOptions provides additional options for a WriteDiff operation -type WriteDiffOptions struct { - ParentLayers []string // Windows needs the full list of parent layers - - writeDiffFunc func(context.Context, io.Writer, string, string, WriteDiffOptions) error - - // SourceDateEpoch specifies the following timestamps to provide control for reproducibility. - // - The upper bound timestamp of the diff contents - // - The timestamp of the whiteouts - // - // See also https://reproducible-builds.org/docs/source-date-epoch/ . - SourceDateEpoch *time.Time -} - -// WriteDiffOpt allows setting mutable archive write properties on creation -type WriteDiffOpt func(options *WriteDiffOptions) error - -// WithSourceDateEpoch specifies the SOURCE_DATE_EPOCH without touching the env vars. -func WithSourceDateEpoch(tm *time.Time) WriteDiffOpt { - return func(options *WriteDiffOptions) error { - options.SourceDateEpoch = tm - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go b/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go deleted file mode 100644 index f88d826e4991..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "golang.org/x/sys/unix" -) - -// AufsConvertWhiteout converts whiteout files for aufs. -func AufsConvertWhiteout(_ *tar.Header, _ string) (bool, error) { - return true, nil -} - -// OverlayConvertWhiteout converts whiteout files for overlay. -func OverlayConvertWhiteout(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque, we need to translate that to overlay - if base == whiteoutOpaqueDir { - // don't write the file itself - return false, unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, whiteoutPrefix) { - originalBase := base[len(whiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err - } - // don't write the file itself - return false, os.Chown(originalPath, hdr.Uid, hdr.Gid) - } - - return true, nil -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go deleted file mode 100644 index 0ba3cd082287..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "context" - "io" - - "github.com/Microsoft/hcsshim/pkg/ociwclayer" -) - -// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer -// See https://github.com/opencontainers/image-spec/blob/main/layer.md#applying-changesets -func applyWindowsLayer(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) { - return ociwclayer.ImportLayerFromTar(ctx, r, root, options.Parents) -} - -// AsWindowsContainerLayer indicates that the tar stream to apply is that of -// a Windows Container Layer. The caller must be holding SeBackupPrivilege and -// SeRestorePrivilege. -func AsWindowsContainerLayer() ApplyOpt { - return func(options *ApplyOptions) error { - options.applyFunc = applyWindowsLayer - return nil - } -} - -// writeDiffWindowsLayers writes a tar stream of the computed difference between the -// provided Windows layers -// -// Produces a tar using OCI style file markers for deletions. Deleted -// files will be prepended with the prefix ".wh.". This style is -// based off AUFS whiteouts. -// See https://github.com/opencontainers/image-spec/blob/main/layer.md -func writeDiffWindowsLayers(ctx context.Context, w io.Writer, _, layer string, options WriteDiffOptions) error { - return ociwclayer.ExportLayerToTar(ctx, w, layer, options.ParentLayers) -} - -// AsWindowsContainerLayerPair indicates that the paths to diff are a pair of -// Windows Container Layers. The caller must be holding SeBackupPrivilege. -func AsWindowsContainerLayerPair() WriteDiffOpt { - return func(options *WriteDiffOptions) error { - options.writeDiffFunc = writeDiffWindowsLayers - return nil - } -} - -// WithParentLayers provides the Windows Container Layers that are the parents -// of the target (right-hand, "upper") layer, if any. The source (left-hand, "lower") -// layer passed to WriteDiff should be "" in this case. -func WithParentLayers(p []string) WriteDiffOpt { - return func(options *WriteDiffOptions) error { - options.ParentLayers = p - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go deleted file mode 100644 index fa611006098f..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ /dev/null @@ -1,203 +0,0 @@ -//go:build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "archive/tar" - "errors" - "fmt" - "os" - "runtime" - "strings" - "syscall" - - "github.com/moby/sys/userns" - "golang.org/x/sys/unix" - - "github.com/containerd/continuity/fs" - "github.com/containerd/continuity/sysx" -) - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error { - // Devmajor and Devminor are only needed for special devices. - - // In FreeBSD, RDev for regular files is -1 (unless overridden by FS): - // https://cgit.freebsd.org/src/tree/sys/kern/vfs_default.c?h=stable/13#n1531 - // (NODEV is -1: https://cgit.freebsd.org/src/tree/sys/sys/param.h?h=stable/13#n241). - - // ZFS in particular does not override the default: - // https://cgit.freebsd.org/src/tree/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c?h=stable/13#n2027 - - // Since `Stat_t.Rdev` is uint64, the cast turns -1 into (2^64 - 1). - // Such large values cannot be encoded in a tar header. - if runtime.GOOS == "freebsd" && hdr.Typeflag != tar.TypeBlock && hdr.Typeflag != tar.TypeChar { - return nil - } - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - - rdev := uint64(s.Rdev) //nolint:nolintlint,unconvert // rdev is int32 on darwin/bsd, int64 on linux/solaris - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(rdev)) - hdr.Devminor = int64(unix.Minor(rdev)) - } - - return nil -} - -func open(p string) (*os.File, error) { - return os.Open(p) -} - -func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { - f, err := os.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - // Call chmod to avoid permission mask - if err := os.Chmod(name, perm); err != nil { - f.Close() - return nil, err - } - return f, err -} - -func mkdir(path string, perm os.FileMode) error { - if err := os.Mkdir(path, perm); err != nil { - return err - } - // Only final created directory gets explicit permission - // call to avoid permission mask - return os.Chmod(path, perm) -} - -func skipFile(hdr *tar.Header) bool { - switch hdr.Typeflag { - case tar.TypeBlock, tar.TypeChar: - // cannot create a device if running in user namespace - return userns.RunningInUserNS() - default: - return false - } -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo. -// This function must not be called for Block and Char when running in userns. -// (skipFile() should return true for them.) -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor))) -} - -func getxattr(path, attr string) ([]byte, error) { - b, err := sysx.LGetxattr(path, attr) - if err == unix.ENOTSUP || err == sysx.ENODATA { - return nil, nil - } - return b, err -} - -func setxattr(path, key, value string) error { - // Do not set trusted attributes - if strings.HasPrefix(key, "trusted.") { - return fmt.Errorf("admin attributes from archive not supported: %w", unix.ENOTSUP) - } - return unix.Lsetxattr(path, key, []byte(value), 0) -} - -func copyDirInfo(fi os.FileInfo, path string) error { - st := fi.Sys().(*syscall.Stat_t) - if err := os.Lchown(path, int(st.Uid), int(st.Gid)); err != nil { - if os.IsPermission(err) { - // Normally if uid/gid are the same this would be a no-op, but some - // filesystems may still return EPERM... for instance NFS does this. - // In such a case, this is not an error. - if dstStat, err2 := os.Lstat(path); err2 == nil { - st2 := dstStat.Sys().(*syscall.Stat_t) - if st.Uid == st2.Uid && st.Gid == st2.Gid { - err = nil - } - } - } - if err != nil { - return fmt.Errorf("failed to chown %s: %w", path, err) - } - } - - if err := os.Chmod(path, fi.Mode()); err != nil { - return fmt.Errorf("failed to chmod %s: %w", path, err) - } - - timespec := []unix.Timespec{ - unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatAtime(st))), - unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))), - } - if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return fmt.Errorf("failed to utime %s: %w", path, err) - } - - return nil -} - -func copyUpXAttrs(dst, src string) error { - xattrKeys, err := sysx.LListxattr(src) - if err != nil { - if err == unix.ENOTSUP || err == sysx.ENODATA { - return nil - } - return fmt.Errorf("failed to list xattrs on %s: %w", src, err) - } - for _, xattr := range xattrKeys { - // Do not copy up trusted attributes - if strings.HasPrefix(xattr, "trusted.") { - continue - } - data, err := sysx.LGetxattr(src, xattr) - if err != nil { - if err == unix.ENOTSUP || err == sysx.ENODATA { - continue - } - return fmt.Errorf("failed to get xattr %q on %s: %w", xattr, src, err) - } - if err := lsetxattrCreate(dst, xattr, data); err != nil { - return fmt.Errorf("failed to set xattr %q on %s: %w", xattr, dst, err) - } - } - - return nil -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go deleted file mode 100644 index dc8f64ee4124..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "archive/tar" - "errors" - "fmt" - "os" - "strings" - - "github.com/moby/sys/sequential" -) - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return nil -} - -func open(p string) (*os.File, error) { - // We use sequential file access to avoid depleting the standby list on - // Windows. - return sequential.Open(p) -} - -func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { - // Source is regular file. We use sequential file access to avoid depleting - // the standby list on Windows. - return sequential.OpenFile(name, flag, perm) -} - -func mkdir(path string, perm os.FileMode) error { - return os.Mkdir(path, perm) -} - -func skipFile(hdr *tar.Header) bool { - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - return strings.Contains(hdr.Name, ":") -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func lchmod(path string, mode os.FileMode) error { - return nil -} - -func getxattr(path, attr string) ([]byte, error) { - return nil, nil -} - -func setxattr(path, key, value string) error { - // Return not support error, do not wrap underlying not supported - // since xattrs should not exist in windows diff archives - return errors.New("xattrs not supported on Windows") -} - -func copyDirInfo(fi os.FileInfo, path string) error { - if err := os.Chmod(path, fi.Mode()); err != nil { - return fmt.Errorf("failed to chmod %s: %w", path, err) - } - return nil -} - -func copyUpXAttrs(dst, src string) error { - return nil -} diff --git a/vendor/github.com/containerd/containerd/archive/tarheader/tarheader.go b/vendor/github.com/containerd/containerd/archive/tarheader/tarheader.go deleted file mode 100644 index 2f93842c1956..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tarheader/tarheader.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Portions from https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L419-L464 - Copyright (C) Docker/Moby authors. - Licensed under the Apache License, Version 2.0 - NOTICE: https://github.com/moby/moby/blob/v23.0.1/NOTICE -*/ - -package tarheader - -import ( - "archive/tar" - "os" -) - -// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to -// prevent tar.FileInfoHeader from introspecting it and potentially calling into -// glibc. -// -// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L419-L434 . -type nosysFileInfo struct { - os.FileInfo -} - -func (fi nosysFileInfo) Sys() interface{} { - // A Sys value of type *tar.Header is safe as it is system-independent. - // The tar.FileInfoHeader function copies the fields into the returned - // header without performing any OS lookups. - if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok { - return sys - } - return nil -} - -// sysStat, if non-nil, populates hdr from system-dependent fields of fi. -// -// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L436-L437 . -var sysStat func(fi os.FileInfo, hdr *tar.Header) error - -// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi. -// -// Compared to the archive/tar.FileInfoHeader function, this function is safe to -// call from a chrooted process as it does not populate fields which would -// require operating system lookups. It behaves identically to -// tar.FileInfoHeader when fi is a FileInfo value returned from -// tar.Header.FileInfo(). -// -// When fi is a FileInfo for a native file, such as returned from os.Stat() and -// os.Lstat(), the returned Header value differs from one returned from -// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not -// set as OS lookups would be required to populate them. The AccessTime and -// ChangeTime fields are not currently set (not yet implemented) although that -// is subject to change. Callers which require the AccessTime or ChangeTime -// fields to be zeroed should explicitly zero them out in the returned Header -// value to avoid any compatibility issues in the future. -// -// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive.go#L439-L464 . -func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link) - if err != nil { - return nil, err - } - if sysStat != nil { - return hdr, sysStat(fi, hdr) - } - return hdr, nil -} diff --git a/vendor/github.com/containerd/containerd/archive/tarheader/tarheader_unix.go b/vendor/github.com/containerd/containerd/archive/tarheader/tarheader_unix.go deleted file mode 100644 index 98ad8f9451c9..000000000000 --- a/vendor/github.com/containerd/containerd/archive/tarheader/tarheader_unix.go +++ /dev/null @@ -1,59 +0,0 @@ -//go:build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -/* - Portions from https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive_unix.go#L52-L70 - Copyright (C) Docker/Moby authors. - Licensed under the Apache License, Version 2.0 - NOTICE: https://github.com/moby/moby/blob/v23.0.1/NOTICE -*/ - -package tarheader - -import ( - "archive/tar" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -func init() { - sysStat = statUnix -} - -// statUnix populates hdr from system-dependent fields of fi without performing -// any OS lookups. -// From https://github.com/moby/moby/blob/v23.0.1/pkg/archive/archive_unix.go#L52-L70 -func statUnix(fi os.FileInfo, hdr *tar.Header) error { - s, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - - hdr.Uid = int(s.Uid) - hdr.Gid = int(s.Gid) - - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) - } - - return nil -} diff --git a/vendor/github.com/containerd/containerd/archive/time.go b/vendor/github.com/containerd/containerd/archive/time.go deleted file mode 100644 index 16651a4d0571..000000000000 --- a/vendor/github.com/containerd/containerd/archive/time.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "syscall" - "time" - "unsafe" -) - -var ( - minTime = time.Unix(0, 0) - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - -func boundTime(t time.Time) time.Time { - if t.Before(minTime) || t.After(maxTime) { - return minTime - } - - return t -} - -func latestTime(t1, t2 time.Time) time.Time { - if t1.Before(t2) { - return t2 - } - return t1 -} diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go deleted file mode 100644 index b5a10d528fa1..000000000000 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "fmt" - "time" - - "golang.org/x/sys/unix" -) - -func chtimes(path string, atime, mtime time.Time) error { - var utimes [2]unix.Timespec - utimes[0] = unix.NsecToTimespec(atime.UnixNano()) - utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) - - if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return fmt.Errorf("failed call to UtimesNanoAt for %s: %w", path, err) - } - - return nil -} diff --git a/vendor/github.com/containerd/containerd/archive/time_windows.go b/vendor/github.com/containerd/containerd/archive/time_windows.go deleted file mode 100644 index d906a3e5e764..000000000000 --- a/vendor/github.com/containerd/containerd/archive/time_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -import ( - "time" - - "golang.org/x/sys/windows" -) - -// chtimes will set the create time on a file using the given modtime. -// This requires calling SetFileTime and explicitly including the create time. -func chtimes(path string, atime, mtime time.Time) error { - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return err - } - h, err := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return err - } - defer windows.Close(h) - c := windows.NsecToFiletime(mtime.UnixNano()) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/containerd/containerd/content/adaptor.go b/vendor/github.com/containerd/containerd/content/adaptor.go deleted file mode 100644 index 88bad2610e8a..000000000000 --- a/vendor/github.com/containerd/containerd/content/adaptor.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "strings" - - "github.com/containerd/containerd/filters" -) - -// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. -func AdaptInfo(info Info) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "digest": - return info.Digest.String(), true - case "size": - // TODO: support size based filtering - case "labels": - return checkMap(fieldpath[1:], info.Labels) - } - - return "", false - }) -} - -func checkMap(fieldpath []string, m map[string]string) (string, bool) { - if len(m) == 0 { - return "", false - } - - value, ok := m[strings.Join(fieldpath, ".")] - return value, ok -} diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go deleted file mode 100644 index 2dc7bf8b52d8..000000000000 --- a/vendor/github.com/containerd/containerd/content/content.go +++ /dev/null @@ -1,207 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "time" - - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Store combines the methods of content-oriented interfaces into a set that -// are commonly provided by complete implementations. -// -// Overall content lifecycle: -// - Ingester is used to initiate a write operation (aka ingestion) -// - IngestManager is used to manage (e.g. list, abort) active ingestions -// - Once an ingestion is complete (see Writer.Commit), Provider is used to -// query a single piece of content by its digest -// - Manager is used to manage (e.g. list, delete) previously committed content -// -// Note that until ingestion is complete, its content is not visible through -// Provider or Manager. Once ingestion is complete, it is no longer exposed -// through IngestManager. -type Store interface { - Manager - Provider - IngestManager - Ingester -} - -// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer -type ReaderAt interface { - io.ReaderAt - io.Closer - Size() int64 -} - -// Provider provides a reader interface for specific content -type Provider interface { - // ReaderAt only requires desc.Digest to be set. - // Other fields in the descriptor may be used internally for resolving - // the location of the actual data. - ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) -} - -// Ingester writes content -type Ingester interface { - // Writer initiates a writing operation (aka ingestion). A single ingestion - // is uniquely identified by its ref, provided using a WithRef option. - // Writer can be called multiple times with the same ref to access the same - // ingestion. - // Once all the data is written, use Writer.Commit to complete the ingestion. - Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) -} - -// IngestManager provides methods for managing ingestions. An ingestion is a -// not-yet-complete writing operation initiated using Ingester and identified -// by a ref string. -type IngestManager interface { - // Status returns the status of the provided ref. - Status(ctx context.Context, ref string) (Status, error) - - // ListStatuses returns the status of any active ingestions whose ref match - // the provided regular expression. If empty, all active ingestions will be - // returned. - ListStatuses(ctx context.Context, filters ...string) ([]Status, error) - - // Abort completely cancels the ingest operation targeted by ref. - Abort(ctx context.Context, ref string) error -} - -// Info holds content specific information -type Info struct { - Digest digest.Digest - Size int64 - CreatedAt time.Time - UpdatedAt time.Time - Labels map[string]string -} - -// Status of a content operation (i.e. an ingestion) -type Status struct { - Ref string - Offset int64 - Total int64 - Expected digest.Digest - StartedAt time.Time - UpdatedAt time.Time -} - -// WalkFunc defines the callback for a blob walk. -type WalkFunc func(Info) error - -// InfoReaderProvider provides both info and reader for the specific content. -type InfoReaderProvider interface { - InfoProvider - Provider -} - -// InfoProvider provides info for content inspection. -type InfoProvider interface { - // Info will return metadata about content available in the content store. - // - // If the content is not present, ErrNotFound will be returned. - Info(ctx context.Context, dgst digest.Digest) (Info, error) -} - -// Manager provides methods for inspecting, listing and removing content. -type Manager interface { - InfoProvider - - // Update updates mutable information related to content. - // If one or more fieldpaths are provided, only those - // fields will be updated. - // Mutable fields: - // labels.* - Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) - - // Walk will call fn for each item in the content store which - // match the provided filters. If no filters are given all - // items will be walked. - Walk(ctx context.Context, fn WalkFunc, filters ...string) error - - // Delete removes the content from the store. - Delete(ctx context.Context, dgst digest.Digest) error -} - -// Writer handles writing of content into a content store -type Writer interface { - // Close closes the writer, if the writer has not been - // committed this allows resuming or aborting. - // Calling Close on a closed writer will not error. - io.WriteCloser - - // Digest may return empty digest or panics until committed. - Digest() digest.Digest - - // Commit commits the blob (but no roll-back is guaranteed on an error). - // size and expected can be zero-value when unknown. - // Commit always closes the writer, even on error. - // ErrAlreadyExists aborts the writer. - Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error - - // Status returns the current state of write - Status() (Status, error) - - // Truncate updates the size of the target blob - Truncate(size int64) error -} - -// Opt is used to alter the mutable properties of content -type Opt func(*Info) error - -// WithLabels allows labels to be set on content -func WithLabels(labels map[string]string) Opt { - return func(info *Info) error { - info.Labels = labels - return nil - } -} - -// WriterOpts is internally used by WriterOpt. -type WriterOpts struct { - Ref string - Desc ocispec.Descriptor -} - -// WriterOpt is used for passing options to Ingester.Writer. -type WriterOpt func(*WriterOpts) error - -// WithDescriptor specifies an OCI descriptor. -// Writer may optionally use the descriptor internally for resolving -// the location of the actual data. -// Write does not require any field of desc to be set. -// If the data size is unknown, desc.Size should be set to 0. -// Some implementations may also accept negative values as "unknown". -func WithDescriptor(desc ocispec.Descriptor) WriterOpt { - return func(opts *WriterOpts) error { - opts.Desc = desc - return nil - } -} - -// WithRef specifies a ref string. -func WithRef(ref string) WriterOpt { - return func(opts *WriterOpts) error { - opts.Ref = ref - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go deleted file mode 100644 index 93bcdde106e4..000000000000 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ /dev/null @@ -1,340 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "errors" - "fmt" - "io" - "sync" - "time" - - "github.com/containerd/log" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/pkg/randutil" -) - -var ErrReset = errors.New("writer has been reset") - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -type reader interface { - Reader() io.Reader -} - -// NewReader returns a io.Reader from a ReaderAt -func NewReader(ra ReaderAt) io.Reader { - if rd, ok := ra.(reader); ok { - return rd.Reader() - } - return io.NewSectionReader(ra, 0, ra.Size()) -} - -// ReadBlob retrieves the entire contents of the blob from the provider. -// -// Avoid using this for large blobs, such as layers. -func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { - if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { - return desc.Data, nil - } - - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - - p := make([]byte, ra.Size()) - - n, err := ra.ReadAt(p, 0) - if err == io.EOF { - if int64(n) != ra.Size() { - err = io.ErrUnexpectedEOF - } else { - err = nil - } - } - return p, err -} - -// WriteBlob writes data with the expected digest into the content store. If -// expected already exists, the method returns immediately and the reader will -// not be consumed. -// -// This is useful when the digest and size are known beforehand. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { - cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return fmt.Errorf("failed to open writer: %w", err) - } - - return nil // already present - } - defer cw.Close() - - return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) -} - -// OpenWriter opens a new writer for the given reference, retrying if the writer -// is locked until the reference is available or returns an error. -func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { - var ( - cw Writer - err error - retry = 16 - ) - for { - cw, err = cs.Writer(ctx, opts...) - if err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - // TODO: Check status to determine if the writer is active, - // continue waiting while active, otherwise return lock - // error or abort. Requires asserting for an ingest manager - - select { - case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))): - if retry < 2048 { - retry = retry << 1 - } - continue - case <-ctx.Done(): - // Propagate lock error - return nil, err - } - - } - break - } - - return cw, err -} - -// Copy copies data with the expected digest from the reader into the -// provided content store writer. This copy commits the writer. -// -// This is useful when the digest and size are known beforehand. When -// the size or digest is unknown, these values may be empty. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error { - ws, err := cw.Status() - if err != nil { - return fmt.Errorf("failed to get status: %w", err) - } - r := or - if ws.Offset > 0 { - r, err = seekReader(or, ws.Offset, size) - if err != nil { - return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) - } - } - - for i := 0; ; i++ { - if i >= 1 { - log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset") - } - copied, err := copyWithBuffer(cw, r) - if errors.Is(err, ErrReset) { - ws, err := cw.Status() - if err != nil { - return fmt.Errorf("failed to get status: %w", err) - } - r, err = seekReader(or, ws.Offset, size) - if err != nil { - return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) - } - continue - } - if err != nil { - return fmt.Errorf("failed to copy: %w", err) - } - if size != 0 && copied < size-ws.Offset { - // Short writes would return its own error, this indicates a read failure - return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) - } - if err := cw.Commit(ctx, size, expected, opts...); err != nil { - if errors.Is(err, ErrReset) { - ws, err := cw.Status() - if err != nil { - return fmt.Errorf("failed to get status: %w", err) - } - r, err = seekReader(or, ws.Offset, size) - if err != nil { - return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) - } - continue - } - if !errdefs.IsAlreadyExists(err) { - return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) - } - } - return nil - } -} - -// CopyReaderAt copies to a writer from a given reader at for the given -// number of bytes. This copy does not commit the writer. -func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { - ws, err := cw.Status() - if err != nil { - return err - } - - copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) - if err != nil { - return fmt.Errorf("failed to copy: %w", err) - } - if copied < n { - // Short writes would return its own error, this indicates a read failure - return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) - } - return nil -} - -// CopyReader copies to a writer from a given reader, returning -// the number of bytes copied. -// Note: if the writer has a non-zero offset, the total number -// of bytes read may be greater than those copied if the reader -// is not an io.Seeker. -// This copy does not commit the writer. -func CopyReader(cw Writer, r io.Reader) (int64, error) { - ws, err := cw.Status() - if err != nil { - return 0, fmt.Errorf("failed to get status: %w", err) - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, 0) - if err != nil { - return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err) - } - } - - return copyWithBuffer(cw, r) -} - -// seekReader attempts to seek the reader to the given offset, either by -// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding -// up to the given offset. -func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { - // attempt to resolve r as a seeker and setup the offset. - seeker, ok := r.(io.Seeker) - if ok { - nn, err := seeker.Seek(offset, io.SeekStart) - if nn != offset { - if err == nil { - err = fmt.Errorf("unexpected seek location without seek error") - } - return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err) - } - - if err != nil { - return nil, err - } - - return r, nil - } - - // ok, let's try io.ReaderAt! - readerAt, ok := r.(io.ReaderAt) - if ok && size > offset { - sr := io.NewSectionReader(readerAt, offset, size) - return sr, nil - } - - // well then, let's just discard up to the offset - n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset)) - if err != nil { - return nil, fmt.Errorf("failed to discard to offset: %w", err) - } - if n != offset { - return nil, errors.New("unable to discard to offset") - } - - return r, nil -} - -// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer -// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have -// a full buffer before we do a write operation to dst to reduce overheads associated -// with the write operations of small buffers. -func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { - // If the reader has a WriteTo method, use it to do the copy. - // Avoids an allocation and a copy. - if wt, ok := src.(io.WriterTo); ok { - return wt.WriteTo(dst) - } - // Similarly, if the writer has a ReadFrom method, use it to do the copy. - if rt, ok := dst.(io.ReaderFrom); ok { - return rt.ReadFrom(src) - } - bufRef := bufPool.Get().(*[]byte) - defer bufPool.Put(bufRef) - buf := *bufRef - for { - nr, er := io.ReadAtLeast(src, buf, len(buf)) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - // If an EOF happens after reading fewer than the requested bytes, - // ReadAtLeast returns ErrUnexpectedEOF. - if er != io.EOF && er != io.ErrUnexpectedEOF { - err = er - } - break - } - } - return -} - -// Exists returns whether an attempt to access the content would not error out -// with an ErrNotFound error. It will return an encountered error if it was -// different than ErrNotFound. -func Exists(ctx context.Context, provider InfoProvider, desc ocispec.Descriptor) (bool, error) { - _, err := provider.Info(ctx, desc.Digest) - if errdefs.IsNotFound(err) { - return false, nil - } - return err == nil, err -} diff --git a/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go deleted file mode 100644 index a523f28d9114..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "bufio" - "bytes" - "context" - _ "crypto/sha256" - "io" - "testing" - - "github.com/opencontainers/go-digest" - - "github.com/containerd/containerd/content" -) - -func FuzzContentStoreWriter(data []byte) int { - t := &testing.T{} - ctx := context.Background() - ctx, _, cs, cleanup := contentStoreEnv(t) - defer cleanup() - - cw, err := cs.Writer(ctx, content.WithRef("myref")) - if err != nil { - return 0 - } - if err := cw.Close(); err != nil { - return 0 - } - - // reopen, so we can test things - cw, err = cs.Writer(ctx, content.WithRef("myref")) - if err != nil { - return 0 - } - - err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data)))) - if err != nil { - return 0 - } - expected := digest.FromBytes(data) - - if err = cw.Commit(ctx, int64(len(data)), expected); err != nil { - return 0 - } - return 1 -} - -func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error { - nn, err := io.Copy(dst, src) - if err != nil { - return err - } - - if nn != size { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go deleted file mode 100644 index 1e59f39b30dd..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/locks.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "fmt" - "sync" - "time" - - "github.com/containerd/containerd/errdefs" -) - -// Handles locking references - -type lock struct { - since time.Time -} - -var ( - // locks lets us lock in process - locks = make(map[string]*lock) - locksMu sync.Mutex -) - -func tryLock(ref string) error { - locksMu.Lock() - defer locksMu.Unlock() - - if v, ok := locks[ref]; ok { - // Returning the duration may help developers distinguish dead locks (long duration) from - // lock contentions (short duration). - now := time.Now() - return fmt.Errorf( - "ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since, - errdefs.ErrUnavailable, - ) - } - - locks[ref] = &lock{time.Now()} - return nil -} - -func unlock(ref string) { - locksMu.Lock() - defer locksMu.Unlock() - - delete(locks, ref) -} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go deleted file mode 100644 index 899e85c0ba87..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/readerat.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "fmt" - "io" - "os" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -// readerat implements io.ReaderAt in a completely stateless manner by opening -// the referenced file for each call to ReadAt. -type sizeReaderAt struct { - size int64 - fp *os.File -} - -// OpenReader creates ReaderAt from a file -func OpenReader(p string) (content.ReaderAt, error) { - fi, err := os.Stat(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) - } - - fp, err := os.Open(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound) - } - - return sizeReaderAt{size: fi.Size(), fp: fp}, nil -} - -func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { - return ra.fp.ReadAt(p, offset) -} - -func (ra sizeReaderAt) Size() int64 { - return ra.size -} - -func (ra sizeReaderAt) Close() error { - return ra.fp.Close() -} - -func (ra sizeReaderAt) Reader() io.Reader { - return io.LimitReader(ra.fp, ra.size) -} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go deleted file mode 100644 index efe886014cd3..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ /dev/null @@ -1,721 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/containerd/log" - "github.com/sirupsen/logrus" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/filters" - "github.com/containerd/containerd/pkg/randutil" - - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// LabelStore is used to store mutable labels for digests -type LabelStore interface { - // Get returns all the labels for the given digest - Get(digest.Digest) (map[string]string, error) - - // Set sets all the labels for a given digest - Set(digest.Digest, map[string]string) error - - // Update replaces the given labels for a digest, - // a key with an empty value removes a label. - Update(digest.Digest, map[string]string) (map[string]string, error) -} - -// Store is digest-keyed store for content. All data written into the store is -// stored under a verifiable digest. -// -// Store can generally support multi-reader, single-writer ingest of data, -// including resumable ingest. -type store struct { - root string - ls LabelStore - - ensureIngestRootOnce func() error -} - -// NewStore returns a local content store -func NewStore(root string) (content.Store, error) { - return NewLabeledStore(root, nil) -} - -// NewLabeledStore returns a new content store using the provided label store -// -// Note: content stores which are used underneath a metadata store may not -// require labels and should use `NewStore`. `NewLabeledStore` is primarily -// useful for tests or standalone implementations. -func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { - s := &store{ - root: root, - ls: ls, - } - - s.ensureIngestRootOnce = sync.OnceValue(s.ensureIngestRoot) - return s, nil -} - -func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - p, err := s.blobPath(dgst) - if err != nil { - return content.Info{}, fmt.Errorf("calculating blob info path: %w", err) - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) - } - - return content.Info{}, err - } - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return content.Info{}, err - } - } - return s.info(dgst, fi, labels), nil -} - -func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { - return content.Info{ - Digest: dgst, - Size: fi.Size(), - CreatedAt: fi.ModTime(), - UpdatedAt: getATime(fi), - Labels: labels, - } -} - -// ReaderAt returns an io.ReaderAt for the blob. -func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - p, err := s.blobPath(desc.Digest) - if err != nil { - return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err) - } - - reader, err := OpenReader(p) - if err != nil { - return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err) - } - - return reader, nil -} - -// Delete removes a blob by its digest. -// -// While this is safe to do concurrently, safe exist-removal logic must hold -// some global lock on the store. -func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { - bp, err := s.blobPath(dgst) - if err != nil { - return fmt.Errorf("calculating blob path for delete: %w", err) - } - - if err := os.RemoveAll(bp); err != nil { - if !os.IsNotExist(err) { - return err - } - - return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound) - } - - return nil -} - -func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - if s.ls == nil { - return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition) - } - - p, err := s.blobPath(info.Digest) - if err != nil { - return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err) - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound) - } - - return content.Info{}, err - } - - var ( - all bool - labels map[string]string - ) - if len(fieldpaths) > 0 { - for _, path := range fieldpaths { - if strings.HasPrefix(path, "labels.") { - if labels == nil { - labels = map[string]string{} - } - - key := strings.TrimPrefix(path, "labels.") - labels[key] = info.Labels[key] - continue - } - - switch path { - case "labels": - all = true - labels = info.Labels - default: - return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument) - } - } - } else { - all = true - labels = info.Labels - } - - if all { - err = s.ls.Set(info.Digest, labels) - } else { - labels, err = s.ls.Update(info.Digest, labels) - } - if err != nil { - return content.Info{}, err - } - - info = s.info(info.Digest, fi, labels) - info.UpdatedAt = time.Now() - - if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { - log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) - } - - return info, nil -} - -func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { - root := filepath.Join(s.root, "blobs") - - filter, err := filters.ParseAll(fs...) - if err != nil { - return err - } - - var alg digest.Algorithm - return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if !fi.IsDir() && !alg.Available() { - return nil - } - - // TODO(stevvooe): There are few more cases with subdirs that should be - // handled in case the layout gets corrupted. This isn't strict enough - // and may spew bad data. - - if path == root { - return nil - } - if filepath.Dir(path) == root { - alg = digest.Algorithm(filepath.Base(path)) - - if !alg.Available() { - alg = "" - return filepath.SkipDir - } - - // descending into a hash directory - return nil - } - - dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path)) - if err := dgst.Validate(); err != nil { - // log error but don't report - log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") - // if we see this, it could mean some sort of corruption of the - // store or extra paths not expected previously. - } - - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return err - } - } - - info := s.info(dgst, fi, labels) - if !filter.Match(content.AdaptInfo(info)) { - return nil - } - return fn(info) - }) -} - -func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { - return s.status(s.ingestRoot(ref)) -} - -func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return nil, err - } - - filter, err := filters.ParseAll(fs...) - if err != nil { - return nil, err - } - - var active []content.Status - for _, fi := range fis { - p := filepath.Join(s.root, "ingest", fi.Name()) - stat, err := s.status(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - // TODO(stevvooe): This is a common error if uploads are being - // completed while making this listing. Need to consider taking a - // lock on the whole store to coordinate this aspect. - // - // Another option is to cleanup downloads asynchronously and - // coordinate this method with the cleanup process. - // - // For now, we just skip them, as they really don't exist. - continue - } - - if filter.Match(adaptStatus(stat)) { - active = append(active, stat) - } - } - - return active, nil -} - -// WalkStatusRefs is used to walk all status references -// Failed status reads will be logged and ignored, if -// this function is called while references are being altered, -// these error messages may be produced. -func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return err - } - - for _, fi := range fis { - rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") - - ref, err := readFileString(rf) - if err != nil { - log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") - continue - } - - if err := fn(ref); err != nil { - return err - } - } - - return nil -} - -// status works like stat above except uses the path to the ingest. -func (s *store) status(ingestPath string) (content.Status, error) { - dp := filepath.Join(ingestPath, "data") - fi, err := os.Stat(dp) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) - } - return content.Status{}, err - } - - ref, err := readFileString(filepath.Join(ingestPath, "ref")) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) - } - return content.Status{}, err - } - - startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) - if err != nil { - return content.Status{}, fmt.Errorf("could not read startedat: %w", err) - } - - updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) - if err != nil { - return content.Status{}, fmt.Errorf("could not read updatedat: %w", err) - } - - // because we don't write updatedat on every write, the mod time may - // actually be more up to date. - if fi.ModTime().After(updatedAt) { - updatedAt = fi.ModTime() - } - - return content.Status{ - Ref: ref, - Offset: fi.Size(), - Total: s.total(ingestPath), - UpdatedAt: updatedAt, - StartedAt: startedAt, - }, nil -} - -func adaptStatus(status content.Status) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - switch fieldpath[0] { - case "ref": - return status.Ref, true - } - - return "", false - }) -} - -// total attempts to resolve the total expected size for the write. -func (s *store) total(ingestPath string) int64 { - totalS, err := readFileString(filepath.Join(ingestPath, "total")) - if err != nil { - return 0 - } - - total, err := strconv.ParseInt(totalS, 10, 64) - if err != nil { - // represents a corrupted file, should probably remove. - return 0 - } - - return total -} - -// Writer begins or resumes the active writer identified by ref. If the writer -// is already in use, an error is returned. Only one writer may be in use per -// ref at a time. -// -// The argument `ref` is used to uniquely identify a long-lived writer transaction. -func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - // TODO(AkihiroSuda): we could create a random string or one calculated based on the context - // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 - if wOpts.Ref == "" { - return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument) - } - var lockErr error - for count := uint64(0); count < 10; count++ { - if err := tryLock(wOpts.Ref); err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - lockErr = err - } else { - lockErr = nil - break - } - time.Sleep(time.Millisecond * time.Duration(randutil.Intn(1< 0 && status.Total > 0 && total != status.Total { - return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total) - } - - //nolint:dupword - // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes - fp, err := os.Open(data) - if err != nil { - return status, err - } - - p := bufPool.Get().(*[]byte) - status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) - bufPool.Put(p) - fp.Close() - return status, err -} - -// writer provides the main implementation of the Writer method. The caller -// must hold the lock correctly and release on error if there is a problem. -func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { - // TODO(stevvooe): Need to actually store expected here. We have - // code in the service that shouldn't be dealing with this. - if expected != "" { - p, err := s.blobPath(expected) - if err != nil { - return nil, fmt.Errorf("calculating expected blob path for writer: %w", err) - } - if _, err := os.Stat(p); err == nil { - return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists) - } - } - - path, refp, data := s.ingestPaths(ref) - - var ( - digester = digest.Canonical.Digester() - offset int64 - startedAt time.Time - updatedAt time.Time - ) - - foundValidIngest := false - - if err := s.ensureIngestRootOnce(); err != nil { - return nil, err - } - - // ensure that the ingest path has been created. - if err := os.Mkdir(path, 0755); err != nil { - if !os.IsExist(err) { - return nil, err - } - status, err := s.resumeStatus(ref, total, digester) - if err == nil { - foundValidIngest = true - updatedAt = status.UpdatedAt - startedAt = status.StartedAt - total = status.Total - offset = status.Offset - } else { - logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) - } - } - - if !foundValidIngest { - startedAt = time.Now() - updatedAt = startedAt - - // the ingest is new, we need to setup the target location. - // write the ref to a file for later use - if err := os.WriteFile(refp, []byte(ref), 0666); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { - return nil, err - } - - if total > 0 { - if err := os.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { - return nil, err - } - } - } - - fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return nil, fmt.Errorf("failed to open data file: %w", err) - } - - if _, err := fp.Seek(offset, io.SeekStart); err != nil { - fp.Close() - return nil, fmt.Errorf("could not seek to current write offset: %w", err) - } - - return &writer{ - s: s, - fp: fp, - ref: ref, - path: path, - offset: offset, - total: total, - digester: digester, - startedAt: startedAt, - updatedAt: updatedAt, - }, nil -} - -// Abort an active transaction keyed by ref. If the ingest is active, it will -// be cancelled. Any resources associated with the ingest will be cleaned. -func (s *store) Abort(ctx context.Context, ref string) error { - root := s.ingestRoot(ref) - if err := os.RemoveAll(root); err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound) - } - - return err - } - - return nil -} - -func (s *store) blobPath(dgst digest.Digest) (string, error) { - if err := dgst.Validate(); err != nil { - return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument) - } - - return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Encoded()), nil -} - -func (s *store) ingestRoot(ref string) string { - // we take a digest of the ref to keep the ingest paths constant length. - // Note that this is not the current or potential digest of incoming content. - dgst := digest.FromString(ref) - return filepath.Join(s.root, "ingest", dgst.Encoded()) -} - -// ingestPaths are returned. The paths are the following: -// -// - root: entire ingest directory -// - ref: name of the starting ref, must be unique -// - data: file where data is written -func (s *store) ingestPaths(ref string) (string, string, string) { - var ( - fp = s.ingestRoot(ref) - rp = filepath.Join(fp, "ref") - dp = filepath.Join(fp, "data") - ) - - return fp, rp, dp -} - -func (s *store) ensureIngestRoot() error { - return os.MkdirAll(filepath.Join(s.root, "ingest"), 0777) -} - -func readFileString(path string) (string, error) { - p, err := os.ReadFile(path) - return string(p), err -} - -// readFileTimestamp reads a file with just a timestamp present. -func readFileTimestamp(p string) (time.Time, error) { - b, err := os.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) - } - return time.Time{}, err - } - - var t time.Time - if err := t.UnmarshalText(b); err != nil { - return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err) - } - - return t, nil -} - -func writeTimestampFile(p string, t time.Time) error { - b, err := t.MarshalText() - if err != nil { - return err - } - return writeToCompletion(p, b, 0666) -} - -func writeToCompletion(path string, data []byte, mode os.FileMode) error { - tmp := fmt.Sprintf("%s.tmp", path) - f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) - if err != nil { - return fmt.Errorf("create tmp file: %w", err) - } - _, err = f.Write(data) - f.Close() - if err != nil { - return fmt.Errorf("write tmp file: %w", err) - } - err = os.Rename(tmp, path) - if err != nil { - return fmt.Errorf("rename tmp file: %w", err) - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/content/local/store_bsd.go deleted file mode 100644 index 7dcc1923270e..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_bsd.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build darwin || freebsd || netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(st.Atimespec.Unix()) - } - - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go deleted file mode 100644 index 45dfa9997e19..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(st.Atim.Unix()) - } - - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go deleted file mode 100644 index cb01c91c7fea..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build linux || solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(st.Atim.Unix()) - } - - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go deleted file mode 100644 index bce849979099..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/test_helper.go b/vendor/github.com/containerd/containerd/content/local/test_helper.go deleted file mode 100644 index 42de01cae8f2..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/test_helper.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "testing" - - "github.com/containerd/containerd/content" -) - -func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) { - tmpdir := t.TempDir() - - cs, err := NewStore(tmpdir) - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithCancel(context.Background()) - return ctx, tmpdir, cs, func() { - cancel() - } -} diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go deleted file mode 100644 index 0cd8f2d04bbd..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/writer.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/containerd/log" - "github.com/opencontainers/go-digest" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -// writer represents a write transaction against the blob store. -type writer struct { - s *store - fp *os.File // opened data file - path string // path to writer dir - ref string // ref key - offset int64 - total int64 - digester digest.Digester - startedAt time.Time - updatedAt time.Time -} - -func (w *writer) Status() (content.Status, error) { - return content.Status{ - Ref: w.ref, - Offset: w.offset, - Total: w.total, - StartedAt: w.startedAt, - UpdatedAt: w.updatedAt, - }, nil -} - -// Digest returns the current digest of the content, up to the current write. -// -// Cannot be called concurrently with `Write`. -func (w *writer) Digest() digest.Digest { - return w.digester.Digest() -} - -// Write p to the transaction. -// -// Note that writes are unbuffered to the backing file. When writing, it is -// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. -func (w *writer) Write(p []byte) (n int, err error) { - n, err = w.fp.Write(p) - w.digester.Hash().Write(p[:n]) - w.offset += int64(len(p)) - w.updatedAt = time.Now() - return n, err -} - -func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - // Ensure even on error the writer is fully closed - defer unlock(w.ref) - - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - - fp := w.fp - w.fp = nil - - if fp == nil { - return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition) - } - - if err := fp.Sync(); err != nil { - fp.Close() - return fmt.Errorf("sync failed: %w", err) - } - - fi, err := fp.Stat() - closeErr := fp.Close() - if err != nil { - return fmt.Errorf("stat on ingest file failed: %w", err) - } - if closeErr != nil { - return fmt.Errorf("failed to close ingest file: %w", closeErr) - } - - if size > 0 && size != fi.Size() { - return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition) - } - - dgst := w.digester.Digest() - if expected != "" && expected != dgst { - return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition) - } - - var ( - ingest = filepath.Join(w.path, "data") - target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst - ) - - // make sure parent directories of blob exist - if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { - return err - } - - if _, err := os.Stat(target); err == nil { - // collision with the target file! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") - } - return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists) - } - - if err := os.Rename(ingest, target); err != nil { - return err - } - - // Ingest has now been made available in the content store, attempt to complete - // setting metadata but errors should only be logged and not returned since - // the content store cannot be cleanly rolled back. - - commitTime := time.Now() - if err := os.Chtimes(target, commitTime, commitTime); err != nil { - log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time") - } - - // clean up!! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory") - } - - if w.s.ls != nil && base.Labels != nil { - if err := w.s.ls.Set(dgst, base.Labels); err != nil { - log.G(ctx).WithField("digest", dgst).Error("failed to set labels") - } - } - - // change to readonly, more important for read, but provides _some_ - // protection from this point on. We use the existing perms with a mask - // only allowing reads honoring the umask on creation. - // - // This removes write and exec, only allowing read per the creation umask. - // - // NOTE: Windows does not support this operation - if runtime.GOOS != "windows" { - if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { - log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly") - } - } - - return nil -} - -// Close the writer, flushing any unwritten data and leaving the progress in -// tact. -// -// If one needs to resume the transaction, a new writer can be obtained from -// `Ingester.Writer` using the same key. The write can then be continued -// from it was left off. -// -// To abandon a transaction completely, first call close then `IngestManager.Abort` to -// clean up the associated resources. -func (w *writer) Close() (err error) { - if w.fp != nil { - w.fp.Sync() - err = w.fp.Close() - writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) - w.fp = nil - unlock(w.ref) - return - } - - return nil -} - -func (w *writer) Truncate(size int64) error { - if size != 0 { - return errors.New("Truncate: unsupported size") - } - w.offset = 0 - w.digester.Hash().Reset() - if _, err := w.fp.Seek(0, io.SeekStart); err != nil { - return err - } - return w.fp.Truncate(0) -} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go deleted file mode 100644 index de22cadd4195..000000000000 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with fmt.Errorf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -package errdefs - -import ( - "github.com/containerd/errdefs" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// These errors map closely to grpc errors. -var ( - ErrUnknown = errdefs.ErrUnknown - ErrInvalidArgument = errdefs.ErrInvalidArgument - ErrNotFound = errdefs.ErrNotFound - ErrAlreadyExists = errdefs.ErrAlreadyExists - ErrPermissionDenied = errdefs.ErrPermissionDenied - ErrResourceExhausted = errdefs.ErrResourceExhausted - ErrFailedPrecondition = errdefs.ErrFailedPrecondition - ErrConflict = errdefs.ErrConflict - ErrNotModified = errdefs.ErrNotModified - ErrAborted = errdefs.ErrAborted - ErrOutOfRange = errdefs.ErrOutOfRange - ErrNotImplemented = errdefs.ErrNotImplemented - ErrInternal = errdefs.ErrInternal - ErrUnavailable = errdefs.ErrUnavailable - ErrDataLoss = errdefs.ErrDataLoss - ErrUnauthenticated = errdefs.ErrUnauthenticated - - IsCanceled = errdefs.IsCanceled - IsUnknown = errdefs.IsUnknown - IsInvalidArgument = errdefs.IsInvalidArgument - IsDeadlineExceeded = errdefs.IsDeadlineExceeded - IsNotFound = errdefs.IsNotFound - IsAlreadyExists = errdefs.IsAlreadyExists - IsPermissionDenied = errdefs.IsPermissionDenied - IsResourceExhausted = errdefs.IsResourceExhausted - IsFailedPrecondition = errdefs.IsFailedPrecondition - IsConflict = errdefs.IsConflict - IsNotModified = errdefs.IsNotModified - IsAborted = errdefs.IsAborted - IsOutOfRange = errdefs.IsOutOfRange - IsNotImplemented = errdefs.IsNotImplemented - IsInternal = errdefs.IsInternal - IsUnavailable = errdefs.IsUnavailable - IsDataLoss = errdefs.IsDataLoss - IsUnauthorized = errdefs.IsUnauthorized -) diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go deleted file mode 100644 index 7a9b33e05afe..000000000000 --- a/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "fmt" - "strings" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = fmt.Errorf("%s: %w", msg, cls) - } else { - err = cls - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go deleted file mode 100644 index 5a9c559c1e0c..000000000000 --- a/vendor/github.com/containerd/containerd/filters/adaptor.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -// Adaptor specifies the mapping of fieldpaths to a type. For the given field -// path, the value and whether it is present should be returned. The mapping of -// the fieldpath to a field is deferred to the adaptor implementation, but -// should generally follow protobuf field path/mask semantics. -type Adaptor interface { - Field(fieldpath []string) (value string, present bool) -} - -// AdapterFunc allows implementation specific matching of fieldpaths -type AdapterFunc func(fieldpath []string) (string, bool) - -// Field returns the field name and true if it exists -func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { - return fn(fieldpath) -} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go deleted file mode 100644 index dcc569a4b7d2..000000000000 --- a/vendor/github.com/containerd/containerd/filters/filter.go +++ /dev/null @@ -1,178 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package filters defines a syntax and parser that can be used for the -// filtration of items across the containerd API. The core is built on the -// concept of protobuf field paths, with quoting. Several operators allow the -// user to flexibly select items based on field presence, equality, inequality -// and regular expressions. Flexible adaptors support working with any type. -// -// The syntax is fairly familiar, if you've used container ecosystem -// projects. At the core, we base it on the concept of protobuf field -// paths, augmenting with the ability to quote portions of the field path -// to match arbitrary labels. These "selectors" come in the following -// syntax: -// -// ``` -// [] -// ``` -// -// A basic example is as follows: -// -// ``` -// name==foo -// ``` -// -// This would match all objects that have a field `name` with the value -// `foo`. If we only want to test if the field is present, we can omit the -// operator. This is most useful for matching labels in containerd. The -// following will match objects that have the field "labels" and have the -// label "foo" defined: -// -// ``` -// labels.foo -// ``` -// -// We also allow for quoting of parts of the field path to allow matching -// of arbitrary items: -// -// ``` -// labels."very complex label"==something -// ``` -// -// We also define `!=` and `~=` as operators. The `!=` will match all -// objects that don't match the value for a field and `~=` will compile the -// target value as a regular expression and match the field value against that. -// -// Selectors can be combined using a comma, such that the resulting -// selector will require all selectors are matched for the object to match. -// The following example will match objects that are named `foo` and have -// the label `bar`: -// -// ``` -// name==foo,labels.bar -// ``` -package filters - -import ( - "regexp" - - "github.com/containerd/log" -) - -// Filter matches specific resources based the provided filter -type Filter interface { - Match(adaptor Adaptor) bool -} - -// FilterFunc is a function that handles matching with an adaptor -type FilterFunc func(Adaptor) bool - -// Match matches the FilterFunc returning true if the object matches the filter -func (fn FilterFunc) Match(adaptor Adaptor) bool { - return fn(adaptor) -} - -// Always is a filter that always returns true for any type of object -var Always FilterFunc = func(adaptor Adaptor) bool { - return true -} - -// Any allows multiple filters to be matched against the object -type Any []Filter - -// Match returns true if any of the provided filters are true -func (m Any) Match(adaptor Adaptor) bool { - for _, m := range m { - if m.Match(adaptor) { - return true - } - } - - return false -} - -// All allows multiple filters to be matched against the object -type All []Filter - -// Match only returns true if all filters match the object -func (m All) Match(adaptor Adaptor) bool { - for _, m := range m { - if !m.Match(adaptor) { - return false - } - } - - return true -} - -type operator int - -const ( - operatorPresent = iota - operatorEqual - operatorNotEqual - operatorMatches -) - -func (op operator) String() string { - switch op { - case operatorPresent: - return "?" - case operatorEqual: - return "==" - case operatorNotEqual: - return "!=" - case operatorMatches: - return "~=" - } - - return "unknown" -} - -type selector struct { - fieldpath []string - operator operator - value string - re *regexp.Regexp -} - -func (m selector) Match(adaptor Adaptor) bool { - value, present := adaptor.Field(m.fieldpath) - - switch m.operator { - case operatorPresent: - return present - case operatorEqual: - return present && value == m.value - case operatorNotEqual: - return value != m.value - case operatorMatches: - if m.re == nil { - r, err := regexp.Compile(m.value) - if err != nil { - log.L.Errorf("error compiling regexp %q", m.value) - return false - } - - m.re = r - } - - return m.re.MatchString(value) - default: - return false - } -} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go deleted file mode 100644 index 32767909b1c9..000000000000 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ /dev/null @@ -1,290 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "fmt" - "io" - - "github.com/containerd/containerd/errdefs" -) - -/* -Parse the strings into a filter that may be used with an adaptor. - -The filter is made up of zero or more selectors. - -The format is a comma separated list of expressions, in the form of -``, known as selectors. All selectors must match the -target object for the filter to be true. - -We define the operators "==" for equality, "!=" for not equal and "~=" for a -regular expression. If the operator and value are not present, the matcher will -test for the presence of a value, as defined by the target object. - -The formal grammar is as follows: - -selectors := selector ("," selector)* -selector := fieldpath (operator value) -fieldpath := field ('.' field)* -field := quoted | [A-Za-z] [A-Za-z0-9_]+ -operator := "==" | "!=" | "~=" -value := quoted | [^\s,]+ -quoted := -*/ -func Parse(s string) (Filter, error) { - // special case empty to match all - if s == "" { - return Always, nil - } - - p := parser{input: s} - return p.parse() -} - -// ParseAll parses each filter in ss and returns a filter that will return true -// if any filter matches the expression. -// -// If no filters are provided, the filter will match anything. -func ParseAll(ss ...string) (Filter, error) { - if len(ss) == 0 { - return Always, nil - } - - var fs []Filter - for _, s := range ss { - f, err := Parse(s) - if err != nil { - return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) - } - - fs = append(fs, f) - } - - return Any(fs), nil -} - -type parser struct { - input string - scanner scanner -} - -func (p *parser) parse() (Filter, error) { - p.scanner.init(p.input) - - ss, err := p.selectors() - if err != nil { - return nil, fmt.Errorf("filters: %w", err) - } - - return ss, nil -} - -func (p *parser) selectors() (Filter, error) { - s, err := p.selector() - if err != nil { - return nil, err - } - - ss := All{s} - -loop: - for { - tok := p.scanner.peek() - switch tok { - case ',': - pos, tok, _ := p.scanner.scan() - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a separator") - } - - s, err := p.selector() - if err != nil { - return nil, err - } - - ss = append(ss, s) - case tokenEOF: - break loop - default: - return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) - } - } - - return ss, nil -} - -func (p *parser) selector() (selector, error) { - fieldpath, err := p.fieldpath() - if err != nil { - return selector{}, err - } - - switch p.scanner.peek() { - case ',', tokenSeparator, tokenEOF: - return selector{ - fieldpath: fieldpath, - operator: operatorPresent, - }, nil - } - - op, err := p.operator() - if err != nil { - return selector{}, err - } - - var allowAltQuotes bool - if op == operatorMatches { - allowAltQuotes = true - } - - value, err := p.value(allowAltQuotes) - if err != nil { - if err == io.EOF { - return selector{}, io.ErrUnexpectedEOF - } - return selector{}, err - } - - return selector{ - fieldpath: fieldpath, - value: value, - operator: op, - }, nil -} - -func (p *parser) fieldpath() ([]string, error) { - f, err := p.field() - if err != nil { - return nil, err - } - - fs := []string{f} -loop: - for { - tok := p.scanner.peek() // lookahead to consume field separator - - switch tok { - case '.': - pos, tok, _ := p.scanner.scan() // consume separator - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a field separator (`.`)") - } - - f, err := p.field() - if err != nil { - return nil, err - } - - fs = append(fs, f) - default: - // let the layer above handle the other bad cases. - break loop - } - } - - return fs, nil -} - -func (p *parser) field() (string, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, false) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected field or quoted") -} - -func (p *parser) operator() (operator, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenOperator: - switch s { - case "==": - return operatorEqual, nil - case "!=": - return operatorNotEqual, nil - case "~=": - return operatorMatches, nil - default: - return 0, p.mkerr(pos, "unsupported operator %q", s) - } - case tokenIllegal: - return 0, p.mkerr(pos, p.scanner.err) - } - - return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) -} - -func (p *parser) value(allowAltQuotes bool) (string, error) { - pos, tok, s := p.scanner.scan() - - switch tok { - case tokenValue, tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, allowAltQuotes) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected value or quoted") -} - -func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { - if !allowAlts && s[0] != '\'' && s[0] != '"' { - return "", p.mkerr(pos, "invalid quote encountered") - } - - uq, err := unquote(s) - if err != nil { - return "", p.mkerr(pos, "unquoting failed: %v", err) - } - - return uq, nil -} - -type parseError struct { - input string - pos int - msg string -} - -func (pe parseError) Error() string { - if pe.pos < len(pe.input) { - before := pe.input[:pe.pos] - location := pe.input[pe.pos : pe.pos+1] // need to handle end - after := pe.input[pe.pos+1:] - - return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) - } - - return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) -} - -func (p *parser) mkerr(pos int, format string, args ...interface{}) error { - return fmt.Errorf("parse error: %w", parseError{ - input: p.input, - pos: pos, - msg: fmt.Sprintf(format, args...), - }) -} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go deleted file mode 100644 index 5c800ef846cc..000000000000 --- a/vendor/github.com/containerd/containerd/filters/quote.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "errors" - "unicode/utf8" -) - -// NOTE(stevvooe): Most of this code in this file is copied from the stdlib -// strconv package and modified to be able to handle quoting with `/` and `|` -// as delimiters. The copyright is held by the Go authors. - -var errQuoteSyntax = errors.New("quote syntax error") - -// UnquoteChar decodes the first character or byte in the escaped string -// or character literal represented by the string s. -// It returns four values: -// -// 1. value, the decoded Unicode code point or byte value; -// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3. tail, the remainder of the string after the character; and -// 4. an error that will be nil if the character is syntactically valid. -// -// The second argument, quote, specifies the type of literal being parsed -// and therefore which escaped quote character is permitted. -// If set to a single quote, it permits the sequence \' and disallows unescaped '. -// If set to a double quote, it permits \" and disallows unescaped ". -// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. -// -// This is from Go strconv package, modified to support `|` and `/` as double -// quotes for use with regular expressions. -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): - err = errQuoteSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = errQuoteSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = errQuoteSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = errQuoteSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = errQuoteSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = errQuoteSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = errQuoteSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = errQuoteSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"', '|', '/': - if c != quote { - err = errQuoteSyntax - return - } - value = rune(c) - default: - err = errQuoteSyntax - return - } - tail = s - return -} - -// unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -// -// This is modified from the standard library to support `|` and `/` as quote -// characters for use with regular expressions. -func unquote(s string) (string, error) { - n := len(s) - if n < 2 { - return "", errQuoteSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", errQuoteSyntax - } - s = s[1 : n-1] - - if quote == '`' { - if contains(s, '`') { - return "", errQuoteSyntax - } - if contains(s, '\r') { - // -1 because we know there is at least one \r to remove. - buf := make([]byte, 0, len(s)-1) - for i := 0; i < len(s); i++ { - if s[i] != '\r' { - buf = append(buf, s[i]) - } - } - return string(buf), nil - } - return s, nil - } - if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { - return "", errQuoteSyntax - } - if contains(s, '\n') { - return "", errQuoteSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) { - switch quote { - case '"', '/', '|': // pipe and slash are treated like double quote - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", errQuoteSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go deleted file mode 100644 index 6a485467b8a0..000000000000 --- a/vendor/github.com/containerd/containerd/filters/scanner.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "unicode" - "unicode/utf8" -) - -const ( - tokenEOF = -(iota + 1) - tokenQuoted - tokenValue - tokenField - tokenSeparator - tokenOperator - tokenIllegal -) - -type token rune - -func (t token) String() string { - switch t { - case tokenEOF: - return "EOF" - case tokenQuoted: - return "Quoted" - case tokenValue: - return "Value" - case tokenField: - return "Field" - case tokenSeparator: - return "Separator" - case tokenOperator: - return "Operator" - case tokenIllegal: - return "Illegal" - } - - return string(t) -} - -func (t token) GoString() string { - return "token" + t.String() -} - -type scanner struct { - input string - pos int - ppos int // bounds the current rune in the string - value bool - err string -} - -func (s *scanner) init(input string) { - s.input = input - s.pos = 0 - s.ppos = 0 -} - -func (s *scanner) next() rune { - if s.pos >= len(s.input) { - return tokenEOF - } - s.pos = s.ppos - - r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) - s.ppos += w - if r == utf8.RuneError { - if w > 0 { - s.error("rune error") - return tokenIllegal - } - return tokenEOF - } - - if r == 0 { - s.error("unexpected null") - return tokenIllegal - } - - return r -} - -func (s *scanner) peek() rune { - pos := s.pos - ppos := s.ppos - ch := s.next() - s.pos = pos - s.ppos = ppos - return ch -} - -func (s *scanner) scan() (nextp int, tk token, text string) { - var ( - ch = s.next() - pos = s.pos - ) - -chomp: - switch { - case ch == tokenEOF: - case ch == tokenIllegal: - case isQuoteRune(ch): - if !s.scanQuoted(ch) { - return pos, tokenIllegal, s.input[pos:s.ppos] - } - return pos, tokenQuoted, s.input[pos:s.ppos] - case isSeparatorRune(ch): - s.value = false - return pos, tokenSeparator, s.input[pos:s.ppos] - case isOperatorRune(ch): - s.scanOperator() - s.value = true - return pos, tokenOperator, s.input[pos:s.ppos] - case unicode.IsSpace(ch): - // chomp - ch = s.next() - pos = s.pos - goto chomp - case s.value: - s.scanValue() - s.value = false - return pos, tokenValue, s.input[pos:s.ppos] - case isFieldRune(ch): - s.scanField() - return pos, tokenField, s.input[pos:s.ppos] - } - - return s.pos, token(ch), "" -} - -func (s *scanner) scanField() { - for { - ch := s.peek() - if !isFieldRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanOperator() { - for { - ch := s.peek() - switch ch { - case '=', '!', '~': - s.next() - default: - return - } - } -} - -func (s *scanner) scanValue() { - for { - ch := s.peek() - if !isValueRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanQuoted(quote rune) bool { - var illegal bool - ch := s.next() // read character after quote - for ch != quote { - if ch == '\n' || ch < 0 { - s.error("quoted literal not terminated") - return false - } - if ch == '\\' { - var legal bool - ch, legal = s.scanEscape(quote) - if !legal { - illegal = true - } - } else { - ch = s.next() - } - } - return !illegal -} - -func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { - ch = s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: - // nothing to do - ch = s.next() - legal = true - case '0', '1', '2', '3', '4', '5', '6', '7': - ch, legal = s.scanDigits(ch, 8, 3) - case 'x': - ch, legal = s.scanDigits(s.next(), 16, 2) - case 'u': - ch, legal = s.scanDigits(s.next(), 16, 4) - case 'U': - ch, legal = s.scanDigits(s.next(), 16, 8) - default: - s.error("illegal escape sequence") - } - return -} - -func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.error("illegal numeric escape sequence") - return ch, false - } - return ch, true -} - -func (s *scanner) error(msg string) { - if s.err == "" { - s.err = msg - } -} - -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} - -func isFieldRune(r rune) bool { - return (r == '_' || isAlphaRune(r) || isDigitRune(r)) -} - -func isAlphaRune(r rune) bool { - return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' -} - -func isDigitRune(r rune) bool { - return r >= '0' && r <= '9' -} - -func isOperatorRune(r rune) bool { - switch r { - case '=', '!', '~': - return true - } - - return false -} - -func isQuoteRune(r rune) bool { - switch r { - case '/', '|', '"': // maybe add single quoting? - return true - } - - return false -} - -func isSeparatorRune(r rune) bool { - switch r { - case ',', '.': - return true - } - - return false -} - -func isValueRune(r rune) bool { - return r != ',' && !unicode.IsSpace(r) && - (unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsNumber(r) || - unicode.IsGraphic(r) || - unicode.IsPunct(r)) -} diff --git a/vendor/github.com/containerd/containerd/images/annotations.go b/vendor/github.com/containerd/containerd/images/annotations.go deleted file mode 100644 index 47d92104cddc..000000000000 --- a/vendor/github.com/containerd/containerd/images/annotations.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -const ( - // AnnotationImageName is an annotation on a Descriptor in an index.json - // containing the `Name` value as used by an `Image` struct - AnnotationImageName = "io.containerd.image.name" -) diff --git a/vendor/github.com/containerd/containerd/images/converter/converter.go b/vendor/github.com/containerd/containerd/images/converter/converter.go deleted file mode 100644 index 4cacf90ba227..000000000000 --- a/vendor/github.com/containerd/containerd/images/converter/converter.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package converter provides image converter -package converter - -import ( - "context" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/leases" - "github.com/containerd/platforms" -) - -type convertOpts struct { - layerConvertFunc ConvertFunc - docker2oci bool - indexConvertFunc ConvertFunc - platformMC platforms.MatchComparer -} - -// Opt is an option for Convert() -type Opt func(*convertOpts) error - -// WithLayerConvertFunc specifies the function that converts layers. -func WithLayerConvertFunc(fn ConvertFunc) Opt { - return func(copts *convertOpts) error { - copts.layerConvertFunc = fn - return nil - } -} - -// WithDockerToOCI converts Docker media types into OCI ones. -func WithDockerToOCI(v bool) Opt { - return func(copts *convertOpts) error { - copts.docker2oci = true - return nil - } -} - -// WithPlatform specifies the platform. -// Defaults to all platforms. -func WithPlatform(p platforms.MatchComparer) Opt { - return func(copts *convertOpts) error { - copts.platformMC = p - return nil - } -} - -// WithIndexConvertFunc specifies the function that converts manifests and index (manifest lists). -// Defaults to DefaultIndexConvertFunc. -func WithIndexConvertFunc(fn ConvertFunc) Opt { - return func(copts *convertOpts) error { - copts.indexConvertFunc = fn - return nil - } -} - -// Client is implemented by *containerd.Client . -type Client interface { - WithLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error) - ContentStore() content.Store - ImageService() images.Store -} - -// Convert converts an image. -func Convert(ctx context.Context, client Client, dstRef, srcRef string, opts ...Opt) (*images.Image, error) { - var copts convertOpts - for _, o := range opts { - if err := o(&copts); err != nil { - return nil, err - } - } - if copts.platformMC == nil { - copts.platformMC = platforms.All - } - if copts.indexConvertFunc == nil { - copts.indexConvertFunc = DefaultIndexConvertFunc(copts.layerConvertFunc, copts.docker2oci, copts.platformMC) - } - - ctx, done, err := client.WithLease(ctx) - if err != nil { - return nil, err - } - defer done(ctx) - - cs := client.ContentStore() - is := client.ImageService() - srcImg, err := is.Get(ctx, srcRef) - if err != nil { - return nil, err - } - - dstDesc, err := copts.indexConvertFunc(ctx, cs, srcImg.Target) - if err != nil { - return nil, err - } - - dstImg := srcImg - dstImg.Name = dstRef - if dstDesc != nil { - dstImg.Target = *dstDesc - } - var res images.Image - if dstRef != srcRef { - _ = is.Delete(ctx, dstRef) - res, err = is.Create(ctx, dstImg) - } else { - res, err = is.Update(ctx, dstImg) - } - return &res, err -} diff --git a/vendor/github.com/containerd/containerd/images/converter/default.go b/vendor/github.com/containerd/containerd/images/converter/default.go deleted file mode 100644 index 746bdd1ab171..000000000000 --- a/vendor/github.com/containerd/containerd/images/converter/default.go +++ /dev/null @@ -1,454 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package converter - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/platforms" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" -) - -// ConvertFunc returns a converted content descriptor. -// When the content was not converted, ConvertFunc returns nil. -type ConvertFunc func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) - -// DefaultIndexConvertFunc is the default convert func used by Convert. -func DefaultIndexConvertFunc(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer) ConvertFunc { - c := &defaultConverter{ - layerConvertFunc: layerConvertFunc, - docker2oci: docker2oci, - platformMC: platformMC, - diffIDMap: make(map[digest.Digest]digest.Digest), - } - return c.convert -} - -// ConvertHookFunc is a callback function called during conversion of a blob. -// orgDesc is the target descriptor to convert. newDesc is passed if conversion happens. -type ConvertHookFunc func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) - -// ConvertHooks is a configuration for hook callbacks called during blob conversion. -type ConvertHooks struct { - // PostConvertHook is a callback function called for each blob after conversion is done. - PostConvertHook ConvertHookFunc -} - -// IndexConvertFuncWithHook is the convert func used by Convert with hook functions support. -func IndexConvertFuncWithHook(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer, hooks ConvertHooks) ConvertFunc { - c := &defaultConverter{ - layerConvertFunc: layerConvertFunc, - docker2oci: docker2oci, - platformMC: platformMC, - diffIDMap: make(map[digest.Digest]digest.Digest), - hooks: hooks, - } - return c.convert -} - -type defaultConverter struct { - layerConvertFunc ConvertFunc - docker2oci bool - platformMC platforms.MatchComparer - diffIDMap map[digest.Digest]digest.Digest // key: old diffID, value: new diffID - diffIDMapMu sync.RWMutex - hooks ConvertHooks -} - -// convert dispatches desc.MediaType and calls c.convert{Layer,Manifest,Index,Config}. -// -// Also converts media type if c.docker2oci is set. -func (c *defaultConverter) convert(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { - var ( - newDesc *ocispec.Descriptor - err error - ) - if images.IsLayerType(desc.MediaType) { - newDesc, err = c.convertLayer(ctx, cs, desc) - } else if images.IsManifestType(desc.MediaType) { - newDesc, err = c.convertManifest(ctx, cs, desc) - } else if images.IsIndexType(desc.MediaType) { - newDesc, err = c.convertIndex(ctx, cs, desc) - } else if images.IsConfigType(desc.MediaType) { - newDesc, err = c.convertConfig(ctx, cs, desc) - } - if err != nil { - return nil, err - } - - if c.hooks.PostConvertHook != nil { - if newDescPost, err := c.hooks.PostConvertHook(ctx, cs, desc, newDesc); err != nil { - return nil, err - } else if newDescPost != nil { - newDesc = newDescPost - } - } - - if images.IsDockerType(desc.MediaType) { - if c.docker2oci { - if newDesc == nil { - newDesc = copyDesc(desc) - } - newDesc.MediaType = ConvertDockerMediaTypeToOCI(newDesc.MediaType) - } else if (newDesc == nil && len(desc.Annotations) != 0) || (newDesc != nil && len(newDesc.Annotations) != 0) { - // Annotations is supported only on OCI manifest. - // We need to remove annotations for Docker media types. - if newDesc == nil { - newDesc = copyDesc(desc) - } - newDesc.Annotations = nil - } - } - logrus.WithField("old", desc).WithField("new", newDesc).Debugf("converted") - return newDesc, nil -} - -func copyDesc(desc ocispec.Descriptor) *ocispec.Descriptor { - descCopy := desc - return &descCopy -} - -// convertLayer converts image layers if c.layerConvertFunc is set. -// -// c.layerConvertFunc can be nil, e.g., for converting Docker media types to OCI ones. -func (c *defaultConverter) convertLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { - if c.layerConvertFunc != nil { - return c.layerConvertFunc(ctx, cs, desc) - } - return nil, nil -} - -// convertManifest converts image manifests. -// -// - converts `.mediaType` if the target format is OCI -// - records diff ID changes in c.diffIDMap -func (c *defaultConverter) convertManifest(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { - var ( - manifest ocispec.Manifest - modified bool - ) - labels, err := readJSON(ctx, cs, &manifest, desc) - if err != nil { - return nil, err - } - if labels == nil { - labels = make(map[string]string) - } - if images.IsDockerType(manifest.MediaType) && c.docker2oci { - manifest.MediaType = ConvertDockerMediaTypeToOCI(manifest.MediaType) - modified = true - } - var mu sync.Mutex - eg, ctx2 := errgroup.WithContext(ctx) - for i, l := range manifest.Layers { - i := i - l := l - oldDiffID, err := images.GetDiffID(ctx, cs, l) - if err != nil { - return nil, err - } - eg.Go(func() error { - newL, err := c.convert(ctx2, cs, l) - if err != nil { - return err - } - if newL != nil { - mu.Lock() - // update GC labels - ClearGCLabels(labels, l.Digest) - labelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", i) - labels[labelKey] = newL.Digest.String() - manifest.Layers[i] = *newL - modified = true - mu.Unlock() - - // diffID changes if the tar entries were modified. - // diffID stays same if only the compression type was changed. - // When diffID changed, add a map entry so that we can update image config. - newDiffID, err := images.GetDiffID(ctx, cs, *newL) - if err != nil { - return err - } - if newDiffID != oldDiffID { - c.diffIDMapMu.Lock() - c.diffIDMap[oldDiffID] = newDiffID - c.diffIDMapMu.Unlock() - } - } - return nil - }) - } - if err := eg.Wait(); err != nil { - return nil, err - } - - newConfig, err := c.convert(ctx, cs, manifest.Config) - if err != nil { - return nil, err - } - if newConfig != nil { - ClearGCLabels(labels, manifest.Config.Digest) - labels["containerd.io/gc.ref.content.config"] = newConfig.Digest.String() - manifest.Config = *newConfig - modified = true - } - - if modified { - return writeJSON(ctx, cs, &manifest, desc, labels) - } - return nil, nil -} - -// convertIndex converts image index. -// -// - converts `.mediaType` if the target format is OCI -// - clears manifest entries that do not match c.platformMC -func (c *defaultConverter) convertIndex(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { - var ( - index ocispec.Index - modified bool - ) - labels, err := readJSON(ctx, cs, &index, desc) - if err != nil { - return nil, err - } - if labels == nil { - labels = make(map[string]string) - } - if images.IsDockerType(index.MediaType) && c.docker2oci { - index.MediaType = ConvertDockerMediaTypeToOCI(index.MediaType) - modified = true - } - - newManifests := make([]ocispec.Descriptor, len(index.Manifests)) - newManifestsToBeRemoved := make(map[int]struct{}) // slice index - var mu sync.Mutex - eg, ctx2 := errgroup.WithContext(ctx) - for i, mani := range index.Manifests { - i := i - mani := mani - labelKey := fmt.Sprintf("containerd.io/gc.ref.content.m.%d", i) - eg.Go(func() error { - if mani.Platform != nil && !c.platformMC.Match(*mani.Platform) { - mu.Lock() - ClearGCLabels(labels, mani.Digest) - newManifestsToBeRemoved[i] = struct{}{} - modified = true - mu.Unlock() - return nil - } - newMani, err := c.convert(ctx2, cs, mani) - if err != nil { - return err - } - mu.Lock() - if newMani != nil { - ClearGCLabels(labels, mani.Digest) - labels[labelKey] = newMani.Digest.String() - // NOTE: for keeping manifest order, we specify `i` index explicitly - newManifests[i] = *newMani - modified = true - } else { - newManifests[i] = mani - } - mu.Unlock() - return nil - }) - } - if err := eg.Wait(); err != nil { - return nil, err - } - if modified { - var newManifestsClean []ocispec.Descriptor - for i, m := range newManifests { - if _, ok := newManifestsToBeRemoved[i]; !ok { - newManifestsClean = append(newManifestsClean, m) - } - } - index.Manifests = newManifestsClean - return writeJSON(ctx, cs, &index, desc, labels) - } - return nil, nil -} - -// convertConfig converts image config contents. -// -// - updates `.rootfs.diff_ids` using c.diffIDMap . -// -// - clears legacy `.config.Image` and `.container_config.Image` fields if `.rootfs.diff_ids` was updated. -func (c *defaultConverter) convertConfig(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { - var ( - cfg DualConfig - cfgAsOCI ocispec.Image // read only, used for parsing cfg - modified bool - ) - - labels, err := readJSON(ctx, cs, &cfg, desc) - if err != nil { - return nil, err - } - if labels == nil { - labels = make(map[string]string) - } - if _, err := readJSON(ctx, cs, &cfgAsOCI, desc); err != nil { - return nil, err - } - - if rootfs := cfgAsOCI.RootFS; rootfs.Type == "layers" { - rootfsModified := false - c.diffIDMapMu.RLock() - for i, oldDiffID := range rootfs.DiffIDs { - if newDiffID, ok := c.diffIDMap[oldDiffID]; ok && newDiffID != oldDiffID { - rootfs.DiffIDs[i] = newDiffID - rootfsModified = true - } - } - c.diffIDMapMu.RUnlock() - if rootfsModified { - rootfsB, err := json.Marshal(rootfs) - if err != nil { - return nil, err - } - cfg["rootfs"] = (*json.RawMessage)(&rootfsB) - modified = true - } - } - - if modified { - // cfg may have dummy value for legacy `.config.Image` and `.container_config.Image` - // We should clear the ID if we changed the diff IDs. - if _, err := clearDockerV1DummyID(cfg); err != nil { - return nil, err - } - return writeJSON(ctx, cs, &cfg, desc, labels) - } - return nil, nil -} - -// clearDockerV1DummyID clears the dummy values for legacy `.config.Image` and `.container_config.Image`. -// Returns true if the cfg was modified. -func clearDockerV1DummyID(cfg DualConfig) (bool, error) { - var modified bool - f := func(k string) error { - if configX, ok := cfg[k]; ok && configX != nil { - var configField map[string]*json.RawMessage - if err := json.Unmarshal(*configX, &configField); err != nil { - return err - } - delete(configField, "Image") - b, err := json.Marshal(configField) - if err != nil { - return err - } - cfg[k] = (*json.RawMessage)(&b) - modified = true - } - return nil - } - if err := f("config"); err != nil { - return modified, err - } - if err := f("container_config"); err != nil { - return modified, err - } - return modified, nil -} - -// DualConfig covers Docker config (v1.0, v1.1, v1.2) and OCI config. -// Unmarshalled as map[string]*json.RawMessage to retain unknown fields on remarshalling. -type DualConfig map[string]*json.RawMessage - -func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) { - info, err := cs.Info(ctx, desc.Digest) - if err != nil { - return nil, err - } - labels := info.Labels - b, err := content.ReadBlob(ctx, cs, desc) - if err != nil { - return nil, err - } - if err := json.Unmarshal(b, x); err != nil { - return nil, err - } - return labels, nil -} - -func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) { - b, err := json.Marshal(x) - if err != nil { - return nil, err - } - dgst := digest.SHA256.FromBytes(b) - ref := fmt.Sprintf("converter-write-json-%s", dgst.String()) - w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) - if err != nil { - return nil, err - } - if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { - w.Close() - return nil, err - } - if err := w.Close(); err != nil { - return nil, err - } - newDesc := oldDesc - newDesc.Size = int64(len(b)) - newDesc.Digest = dgst - return &newDesc, nil -} - -// ConvertDockerMediaTypeToOCI converts a media type string -func ConvertDockerMediaTypeToOCI(mt string) string { - switch mt { - case images.MediaTypeDockerSchema2ManifestList: - return ocispec.MediaTypeImageIndex - case images.MediaTypeDockerSchema2Manifest: - return ocispec.MediaTypeImageManifest - case images.MediaTypeDockerSchema2LayerGzip: - return ocispec.MediaTypeImageLayerGzip - case images.MediaTypeDockerSchema2LayerForeignGzip: - return ocispec.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // deprecated - case images.MediaTypeDockerSchema2Layer: - return ocispec.MediaTypeImageLayer - case images.MediaTypeDockerSchema2LayerForeign: - return ocispec.MediaTypeImageLayerNonDistributable //nolint:staticcheck // deprecated - case images.MediaTypeDockerSchema2Config: - return ocispec.MediaTypeImageConfig - default: - return mt - } -} - -// ClearGCLabels clears GC labels for the given digest. -func ClearGCLabels(labels map[string]string, dgst digest.Digest) { - for k, v := range labels { - if v == dgst.String() && strings.HasPrefix(k, "containerd.io/gc.ref.content") { - delete(labels, k) - } - } -} diff --git a/vendor/github.com/containerd/containerd/images/diffid.go b/vendor/github.com/containerd/containerd/images/diffid.go deleted file mode 100644 index 85577eedee4a..000000000000 --- a/vendor/github.com/containerd/containerd/images/diffid.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "io" - - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/labels" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -// GetDiffID gets the diff ID of the layer blob descriptor. -func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { - switch desc.MediaType { - case - // If the layer is already uncompressed, we can just return its digest - MediaTypeDockerSchema2Layer, - ocispec.MediaTypeImageLayer, - MediaTypeDockerSchema2LayerForeign, - ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // deprecated - return desc.Digest, nil - } - info, err := cs.Info(ctx, desc.Digest) - if err != nil { - return "", err - } - v, ok := info.Labels[labels.LabelUncompressed] - if ok { - // Fast path: if the image is already unpacked, we can use the label value - return digest.Parse(v) - } - // if the image is not unpacked, we may not have the label - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return "", err - } - defer ra.Close() - r := content.NewReader(ra) - uR, err := compression.DecompressStream(r) - if err != nil { - return "", err - } - defer uR.Close() - digester := digest.Canonical.Digester() - hashW := digester.Hash() - if _, err := io.Copy(hashW, uR); err != nil { - return "", err - } - if err := ra.Close(); err != nil { - return "", err - } - digest := digester.Digest() - // memorize the computed value - if info.Labels == nil { - info.Labels = make(map[string]string) - } - info.Labels[labels.LabelUncompressed] = digest.String() - if _, err := cs.Update(ctx, info, "labels"); err != nil { - logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) - } - return digest, nil -} diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go deleted file mode 100644 index a685092e2cb2..000000000000 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ /dev/null @@ -1,323 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "errors" - "fmt" - "sort" - - "github.com/containerd/platforms" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -var ( - // ErrSkipDesc is used to skip processing of a descriptor and - // its descendants. - ErrSkipDesc = errors.New("skip descriptor") - - // ErrStopHandler is used to signify that the descriptor - // has been handled and should not be handled further. - // This applies only to a single descriptor in a handler - // chain and does not apply to descendant descriptors. - ErrStopHandler = errors.New("stop handler") - - // ErrEmptyWalk is used when the WalkNotEmpty handlers return no - // children (e.g.: they were filtered out). - ErrEmptyWalk = errors.New("image might be filtered out") -) - -// Handler handles image manifests -type Handler interface { - Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) -} - -// HandlerFunc function implementing the Handler interface -type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) - -// Handle image manifests -func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - return fn(ctx, desc) -} - -// Handlers returns a handler that will run the handlers in sequence. -// -// A handler may return `ErrStopHandler` to stop calling additional handlers -func Handlers(handlers ...Handler) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - var children []ocispec.Descriptor - for _, handler := range handlers { - ch, err := handler.Handle(ctx, desc) - if err != nil { - if errors.Is(err, ErrStopHandler) { - break - } - return nil, err - } - - children = append(children, ch...) - } - - return children, nil - } -} - -// Walk the resources of an image and call the handler for each. If the handler -// decodes the sub-resources for each image, -// -// This differs from dispatch in that each sibling resource is considered -// synchronously. -func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { - for _, desc := range descs { - - children, err := handler.Handle(ctx, desc) - if err != nil { - if errors.Is(err, ErrSkipDesc) { - continue // don't traverse the children. - } - return err - } - - if len(children) > 0 { - if err := Walk(ctx, handler, children...); err != nil { - return err - } - } - } - return nil -} - -// WalkNotEmpty works the same way Walk does, with the exception that it ensures that -// some children are still found by Walking the descriptors (for example, not all of -// them have been filtered out by one of the handlers). If there are no children, -// then an ErrEmptyWalk error is returned. -func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { - isEmpty := true - var notEmptyHandler HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := handler.Handle(ctx, desc) - if err != nil { - return children, err - } - - if len(children) > 0 { - isEmpty = false - } - - return children, nil - } - - err := Walk(ctx, notEmptyHandler, descs...) - if err != nil { - return err - } - - if isEmpty { - return ErrEmptyWalk - } - - return nil -} - -// Dispatch runs the provided handler for content specified by the descriptors. -// If the handler decode subresources, they will be visited, as well. -// -// Handlers for siblings are run in parallel on the provided descriptors. A -// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse -// any children. -// -// A concurrency limiter can be passed in to limit the number of concurrent -// handlers running. When limiter is nil, there is no limit. -// -// Typically, this function will be used with `FetchHandler`, often composed -// with other handlers. -// -// If any handler returns an error, the dispatch session will be canceled. -func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { - eg, ctx2 := errgroup.WithContext(ctx) - for _, desc := range descs { - desc := desc - - if limiter != nil { - if err := limiter.Acquire(ctx, 1); err != nil { - return err - } - } - - eg.Go(func() error { - desc := desc - - children, err := handler.Handle(ctx2, desc) - if limiter != nil { - limiter.Release(1) - } - if err != nil { - if errors.Is(err, ErrSkipDesc) { - return nil // don't traverse the children. - } - return err - } - - if len(children) > 0 { - return Dispatch(ctx2, handler, limiter, children...) - } - - return nil - }) - } - - return eg.Wait() -} - -// ChildrenHandler decodes well-known manifest types and returns their children. -// -// This is useful for supporting recursive fetch and other use cases where you -// want to do a full walk of resources. -// -// One can also replace this with another implementation to allow descending of -// arbitrary types. -func ChildrenHandler(provider content.Provider) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - return Children(ctx, provider, desc) - } -} - -// SetChildrenLabels is a handler wrapper which sets labels for the content on -// the children returned by the handler and passes through the children. -// Must follow a handler that returns the children to be labeled. -func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { - return SetChildrenMappedLabels(manager, f, nil) -} - -// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on -// the children returned by the handler and passes through the children. -// Must follow a handler that returns the children to be labeled. -// The label map allows the caller to control the labels per child descriptor. -// For returned labels, the index of the child will be appended to the end -// except for the first index when the returned label does not end with '.'. -func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { - if labelMap == nil { - labelMap = ChildGCLabels - } - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - if len(children) > 0 { - var ( - info = content.Info{ - Digest: desc.Digest, - Labels: map[string]string{}, - } - fields = []string{} - keys = map[string]uint{} - ) - for _, ch := range children { - labelKeys := labelMap(ch) - for _, key := range labelKeys { - idx := keys[key] - keys[key] = idx + 1 - if idx > 0 || key[len(key)-1] == '.' { - key = fmt.Sprintf("%s%d", key, idx) - } - - info.Labels[key] = ch.Digest.String() - fields = append(fields, "labels."+key) - } - } - - _, err := manager.Update(ctx, info, fields...) - if err != nil { - return nil, err - } - } - - return children, err - } -} - -// FilterPlatforms is a handler wrapper which limits the descriptors returned -// based on matching the specified platform matcher. -func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - var descs []ocispec.Descriptor - - if m == nil { - descs = children - } else { - for _, d := range children { - if d.Platform == nil || m.Match(*d.Platform) { - descs = append(descs, d) - } - } - } - - return descs, nil - } -} - -// LimitManifests is a handler wrapper which filters the manifest descriptors -// returned using the provided platform. -// The results will be ordered according to the comparison operator and -// use the ordering in the manifests for equal matches. -// A limit of 0 or less is considered no limit. -// A not found error is returned if no manifest is matched. -func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - switch desc.MediaType { - case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: - sort.SliceStable(children, func(i, j int) bool { - if children[i].Platform == nil { - return false - } - if children[j].Platform == nil { - return true - } - return m.Less(*children[i].Platform, *children[j].Platform) - }) - - if n > 0 { - if len(children) == 0 { - return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound) - } - if len(children) > n { - children = children[:n] - } - } - default: - // only limit manifests from an index - } - return children, nil - } -} diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go deleted file mode 100644 index b934e3496189..000000000000 --- a/vendor/github.com/containerd/containerd/images/image.go +++ /dev/null @@ -1,444 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "encoding/json" - "fmt" - "sort" - "time" - - "github.com/containerd/log" - "github.com/containerd/platforms" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -// Image provides the model for how containerd views container images. -type Image struct { - // Name of the image. - // - // To be pulled, it must be a reference compatible with resolvers. - // - // This field is required. - Name string - - // Labels provide runtime decoration for the image record. - // - // There is no default behavior for how these labels are propagated. They - // only decorate the static metadata object. - // - // This field is optional. - Labels map[string]string - - // Target describes the root content for this image. Typically, this is - // a manifest, index or manifest list. - Target ocispec.Descriptor - - CreatedAt, UpdatedAt time.Time -} - -// DeleteOptions provide options on image delete -type DeleteOptions struct { - Synchronous bool -} - -// DeleteOpt allows configuring a delete operation -type DeleteOpt func(context.Context, *DeleteOptions) error - -// SynchronousDelete is used to indicate that an image deletion and removal of -// the image resources should occur synchronously before returning a result. -func SynchronousDelete() DeleteOpt { - return func(ctx context.Context, o *DeleteOptions) error { - o.Synchronous = true - return nil - } -} - -// Store and interact with images -type Store interface { - Get(ctx context.Context, name string) (Image, error) - List(ctx context.Context, filters ...string) ([]Image, error) - Create(ctx context.Context, image Image) (Image, error) - - // Update will replace the data in the store with the provided image. If - // one or more fieldpaths are provided, only those fields will be updated. - Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) - - Delete(ctx context.Context, name string, opts ...DeleteOpt) error -} - -// TODO(stevvooe): Many of these functions make strong platform assumptions, -// which are untrue in a lot of cases. More refactoring must be done here to -// make this work in all cases. - -// Config resolves the image configuration descriptor. -// -// The caller can then use the descriptor to resolve and process the -// configuration of the image. -func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { - return Config(ctx, provider, image.Target, platform) -} - -// RootFS returns the unpacked diffids that make up and images rootfs. -// -// These are used to verify that a set of layers unpacked to the expected -// values. -func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { - desc, err := image.Config(ctx, provider, platform) - if err != nil { - return nil, err - } - return RootFS(ctx, provider, desc) -} - -// Size returns the total size of an image's packed resources. -func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { - var size int64 - return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Size < 0 { - return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) - } - size += desc.Size - return nil, nil - }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) -} - -type platformManifest struct { - p *ocispec.Platform - m *ocispec.Manifest -} - -// Manifest resolves a manifest from the image for the given platform. -// -// When a manifest descriptor inside of a manifest index does not have -// a platform defined, the platform from the image config is considered. -// -// If the descriptor points to a non-index manifest, then the manifest is -// unmarshalled and returned without considering the platform inside of the -// config. -// -// TODO(stevvooe): This violates the current platform agnostic approach to this -// package by returning a specific manifest type. We'll need to refactor this -// to return a manifest descriptor or decide that we want to bring the API in -// this direction because this abstraction is not needed. -func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { - var ( - limit = 1 - m []platformManifest - wasIndex bool - ) - - if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) - } - - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - if desc.Digest != image.Digest && platform != nil { - if desc.Platform != nil && !platform.Match(*desc.Platform) { - return nil, nil - } - - if desc.Platform == nil { - p, err := content.ReadBlob(ctx, provider, manifest.Config) - if err != nil { - return nil, err - } - - var image ocispec.Image - if err := json.Unmarshal(p, &image); err != nil { - return nil, err - } - - if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { - return nil, nil - } - - } - } - - m = append(m, platformManifest{ - p: desc.Platform, - m: &manifest, - }) - - return nil, nil - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err) - } - - var idx ocispec.Index - if err := json.Unmarshal(p, &idx); err != nil { - return nil, err - } - - if platform == nil { - return idx.Manifests, nil - } - - var descs []ocispec.Descriptor - for _, d := range idx.Manifests { - if d.Platform == nil || platform.Match(*d.Platform) { - descs = append(descs, d) - } - } - - sort.SliceStable(descs, func(i, j int) bool { - if descs[i].Platform == nil { - return false - } - if descs[j].Platform == nil { - return true - } - return platform.Less(*descs[i].Platform, *descs[j].Platform) - }) - - wasIndex = true - - if len(descs) > limit { - return descs[:limit], nil - } - return descs, nil - } - return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound) - }), image); err != nil { - return ocispec.Manifest{}, err - } - - if len(m) == 0 { - err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound) - if wasIndex { - err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound) - } - return ocispec.Manifest{}, err - } - return *m[0].m, nil -} - -// Config resolves the image configuration descriptor using a content provided -// to resolve child resources on the image. -// -// The caller can then use the descriptor to resolve and process the -// configuration of the image. -func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { - manifest, err := Manifest(ctx, provider, image, platform) - if err != nil { - return ocispec.Descriptor{}, err - } - return manifest.Config, err -} - -// Platforms returns one or more platforms supported by the image. -func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { - var platformSpecs []ocispec.Platform - return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Platform != nil { - if desc.Platform.OS == "unknown" || desc.Platform.Architecture == "unknown" { - return nil, ErrSkipDesc - } - platformSpecs = append(platformSpecs, *desc.Platform) - return nil, ErrSkipDesc - } - - switch desc.MediaType { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - var image ocispec.Image - if err := json.Unmarshal(p, &image); err != nil { - return nil, err - } - - platformSpecs = append(platformSpecs, - platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) - } - return nil, nil - }), ChildrenHandler(provider)), image) -} - -// Check returns nil if the all components of an image are available in the -// provider for the specified platform. -// -// If available is true, the caller can assume that required represents the -// complete set of content required for the image. -// -// missing will have the components that are part of required but not available -// in the provider. -// -// If there is a problem resolving content, an error will be returned. -func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { - mfst, err := Manifest(ctx, provider, image, platform) - if err != nil { - if errdefs.IsNotFound(err) { - return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil - } - - return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) - } - - // TODO(stevvooe): It is possible that referenced components could have - // children, but this is rare. For now, we ignore this and only verify - // that manifest components are present. - required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) - - for _, desc := range required { - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - if errdefs.IsNotFound(err) { - missing = append(missing, desc) - continue - } else { - return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err) - } - } - ra.Close() - present = append(present, desc) - - } - - return true, required, present, missing, nil -} - -// Children returns the immediate children of content described by the descriptor. -func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var descs []ocispec.Descriptor - switch desc.MediaType { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) - } - - // TODO(stevvooe): We just assume oci manifest, for now. There may be - // subtle differences from the docker version. - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - descs = append(descs, manifest.Config) - descs = append(descs, manifest.Layers...) - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err) - } - - var index ocispec.Index - if err := json.Unmarshal(p, &index); err != nil { - return nil, err - } - - descs = append(descs, index.Manifests...) - default: - if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { - // childless data types. - return nil, nil - } - log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) - } - - return descs, nil -} - -// unknownDocument represents a manifest, manifest list, or index that has not -// yet been validated. -type unknownDocument struct { - MediaType string `json:"mediaType,omitempty"` - Config json.RawMessage `json:"config,omitempty"` - Layers json.RawMessage `json:"layers,omitempty"` - Manifests json.RawMessage `json:"manifests,omitempty"` - FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 -} - -// validateMediaType returns an error if the byte slice is invalid JSON or if -// the media type identifies the blob as one format but it contains elements of -// another format. -func validateMediaType(b []byte, mt string) error { - var doc unknownDocument - if err := json.Unmarshal(b, &doc); err != nil { - return err - } - if len(doc.FSLayers) != 0 { - return fmt.Errorf("media-type: schema 1 not supported") - } - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - if len(doc.Manifests) != 0 || - doc.MediaType == MediaTypeDockerSchema2ManifestList || - doc.MediaType == ocispec.MediaTypeImageIndex { - return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) - } - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - if len(doc.Config) != 0 || len(doc.Layers) != 0 || - doc.MediaType == MediaTypeDockerSchema2Manifest || - doc.MediaType == ocispec.MediaTypeImageManifest { - return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) - } - } - return nil -} - -// RootFS returns the unpacked diffids that make up and images rootfs. -// -// These are used to verify that a set of layers unpacked to the expected -// values. -func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { - p, err := content.ReadBlob(ctx, provider, configDesc) - if err != nil { - return nil, err - } - - var config ocispec.Image - if err := json.Unmarshal(p, &config); err != nil { - return nil, err - } - return config.RootFS.DiffIDs, nil -} diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go deleted file mode 100644 index 843adcadc7ec..000000000000 --- a/vendor/github.com/containerd/containerd/images/importexport.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "io" - - "github.com/containerd/containerd/content" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Importer is the interface for image importer. -type Importer interface { - // Import imports an image from a tar stream. - Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) -} - -// Exporter is the interface for image exporter. -type Exporter interface { - // Export exports an image to a tar stream. - Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error -} diff --git a/vendor/github.com/containerd/containerd/images/labels.go b/vendor/github.com/containerd/containerd/images/labels.go deleted file mode 100644 index 06dfed572d43..000000000000 --- a/vendor/github.com/containerd/containerd/images/labels.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -const ( - ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1" -) diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go deleted file mode 100644 index d3b28d42dc61..000000000000 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "fmt" - "sort" - "strings" - - "github.com/containerd/containerd/errdefs" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// mediatype definitions for image components handled in containerd. -// -// oci components are generally referenced directly, although we may centralize -// here for clarity. -const ( - MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" - MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" - MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" - MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" - MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" - MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - - // Checkpoint/Restore Media Types - - MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" - MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" - MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" - MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" - MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" - MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" - MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" - MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" - - // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest - MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - - // Encrypted media types - - MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" - MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" -) - -// DiffCompression returns the compression as defined by the layer diff media -// type. For Docker media types without compression, "unknown" is returned to -// indicate that the media type may be compressed. If the media type is not -// recognized as a layer diff, then it returns errdefs.ErrNotImplemented -func DiffCompression(ctx context.Context, mediaType string) (string, error) { - base, ext := parseMediaTypes(mediaType) - switch base { - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: - if len(ext) > 0 { - // Type is wrapped - return "", nil - } - // These media types may have been compressed but failed to - // use the correct media type. The decompression function - // should detect and handle this case. - return "unknown", nil - case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: - if len(ext) > 0 { - // Type is wrapped - return "", nil - } - return "gzip", nil - case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // Non-distributable layers are deprecated - if len(ext) > 0 { - switch ext[len(ext)-1] { - case "gzip": - return "gzip", nil - case "zstd": - return "zstd", nil - } - } - return "", nil - default: - return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented) - } -} - -// parseMediaTypes splits the media type into the base type and -// an array of sorted extensions -func parseMediaTypes(mt string) (mediaType string, suffixes []string) { - if mt == "" { - return "", []string{} - } - mediaType, ext, ok := strings.Cut(mt, "+") - if !ok { - return mediaType, []string{} - } - - // Splitting the extensions following the mediatype "(+)gzip+encrypted". - // We expect this to be a limited list, so add an arbitrary limit (50). - // - // Note that DiffCompression is only using the last element, so perhaps we - // should split on the last "+" only. - suffixes = strings.SplitN(ext, "+", 50) - sort.Strings(suffixes) - return mediaType, suffixes -} - -// IsNonDistributable returns true if the media type is non-distributable. -func IsNonDistributable(mt string) bool { - return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || - strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") -} - -// IsLayerType returns true if the media type is a layer -func IsLayerType(mt string) bool { - if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { - return true - } - - // Parse Docker media types, strip off any + suffixes first - switch base, _ := parseMediaTypes(mt); base { - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, - MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: - return true - } - return false -} - -// IsDockerType returns true if the media type has "application/vnd.docker." prefix -func IsDockerType(mt string) bool { - return strings.HasPrefix(mt, "application/vnd.docker.") -} - -// IsManifestType returns true if the media type is an OCI-compatible manifest. -// No support for schema1 manifest. -func IsManifestType(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - return true - default: - return false - } -} - -// IsIndexType returns true if the media type is an OCI-compatible index. -func IsIndexType(mt string) bool { - switch mt { - case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: - return true - default: - return false - } -} - -// IsConfigType returns true if the media type is an OCI-compatible image config. -// No support for containerd checkpoint configs. -func IsConfigType(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - return true - default: - return false - } -} - -// IsKnownConfig returns true if the media type is a known config type, -// including containerd checkpoint configs -func IsKnownConfig(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, - MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: - return true - } - return false -} - -// ChildGCLabels returns the label for a given descriptor to reference it -func ChildGCLabels(desc ocispec.Descriptor) []string { - mt := desc.MediaType - if IsKnownConfig(mt) { - return []string{"containerd.io/gc.ref.content.config"} - } - - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - return []string{"containerd.io/gc.ref.content.m."} - } - - if IsLayerType(mt) { - return []string{"containerd.io/gc.ref.content.l."} - } - - return []string{"containerd.io/gc.ref.content."} -} - -// ChildGCLabelsFilterLayers returns the labels for a given descriptor to -// reference it, skipping layer media types -func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { - if IsLayerType(desc.MediaType) { - return nil - } - return ChildGCLabels(desc) -} diff --git a/vendor/github.com/containerd/containerd/labels/labels.go b/vendor/github.com/containerd/containerd/labels/labels.go deleted file mode 100644 index 0f9bab5c5d46..000000000000 --- a/vendor/github.com/containerd/containerd/labels/labels.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package labels - -// LabelUncompressed is added to compressed layer contents. -// The value is digest of the uncompressed content. -const LabelUncompressed = "containerd.io/uncompressed" - -// LabelSharedNamespace is added to a namespace to allow that namespaces -// contents to be shared. -const LabelSharedNamespace = "containerd.io/namespace.shareable" - -// LabelDistributionSource is added to content to indicate its origin. -// e.g., "containerd.io/distribution.source.docker.io=library/redis" -const LabelDistributionSource = "containerd.io/distribution.source" diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go deleted file mode 100644 index f83b5dde294d..000000000000 --- a/vendor/github.com/containerd/containerd/labels/validate.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package labels - -import ( - "fmt" - - "github.com/containerd/containerd/errdefs" -) - -const ( - maxSize = 4096 - // maximum length of key portion of error message if len of key + len of value > maxSize - keyMaxLen = 64 -) - -// Validate a label's key and value are under 4096 bytes -func Validate(k, v string) error { - total := len(k) + len(v) - if total > maxSize { - if len(k) > keyMaxLen { - k = k[:keyMaxLen] - } - return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument) - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/leases/context.go b/vendor/github.com/containerd/containerd/leases/context.go deleted file mode 100644 index 599c549d31bb..000000000000 --- a/vendor/github.com/containerd/containerd/leases/context.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package leases - -import "context" - -type leaseKey struct{} - -// WithLease sets a given lease on the context -func WithLease(ctx context.Context, lid string) context.Context { - ctx = context.WithValue(ctx, leaseKey{}, lid) - - // also store on the grpc headers so it gets picked up by any clients that - // are using this. - return withGRPCLeaseHeader(ctx, lid) -} - -// FromContext returns the lease from the context. -func FromContext(ctx context.Context) (string, bool) { - lid, ok := ctx.Value(leaseKey{}).(string) - if !ok { - return fromGRPCHeader(ctx) - } - - return lid, ok -} diff --git a/vendor/github.com/containerd/containerd/leases/grpc.go b/vendor/github.com/containerd/containerd/leases/grpc.go deleted file mode 100644 index 22f287a8bfc6..000000000000 --- a/vendor/github.com/containerd/containerd/leases/grpc.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package leases - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -const ( - // GRPCHeader defines the header name for specifying a containerd lease. - GRPCHeader = "containerd-lease" -) - -func withGRPCLeaseHeader(ctx context.Context, lid string) context.Context { - // also store on the grpc headers so it gets picked up by any clients - // that are using this. - txheader := metadata.Pairs(GRPCHeader, lid) - md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context. - if !ok { - md = txheader - } else { - // order ensures the latest is first in this list. - md = metadata.Join(txheader, md) - } - - return metadata.NewOutgoingContext(ctx, md) -} - -func fromGRPCHeader(ctx context.Context) (string, bool) { - // try to extract for use in grpc servers. - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "", false - } - - values := md[GRPCHeader] - if len(values) == 0 { - return "", false - } - - return values[0], true -} diff --git a/vendor/github.com/containerd/containerd/leases/id.go b/vendor/github.com/containerd/containerd/leases/id.go deleted file mode 100644 index 8f5dc93f348e..000000000000 --- a/vendor/github.com/containerd/containerd/leases/id.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package leases - -import ( - "crypto/rand" - "encoding/base64" - "fmt" - "time" -) - -// WithRandomID sets the lease ID to a random unique value -func WithRandomID() Opt { - return func(l *Lease) error { - t := time.Now() - var b [3]byte - rand.Read(b[:]) - l.ID = fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) - return nil - } -} - -// WithID sets the ID for the lease -func WithID(id string) Opt { - return func(l *Lease) error { - l.ID = id - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/leases/lease.go b/vendor/github.com/containerd/containerd/leases/lease.go deleted file mode 100644 index fc0ca3491c55..000000000000 --- a/vendor/github.com/containerd/containerd/leases/lease.go +++ /dev/null @@ -1,91 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package leases - -import ( - "context" - "time" -) - -// Opt is used to set options on a lease -type Opt func(*Lease) error - -// DeleteOpt allows configuring a delete operation -type DeleteOpt func(context.Context, *DeleteOptions) error - -// Manager is used to create, list, and remove leases -type Manager interface { - Create(context.Context, ...Opt) (Lease, error) - Delete(context.Context, Lease, ...DeleteOpt) error - List(context.Context, ...string) ([]Lease, error) - AddResource(context.Context, Lease, Resource) error - DeleteResource(context.Context, Lease, Resource) error - ListResources(context.Context, Lease) ([]Resource, error) -} - -// Lease retains resources to prevent cleanup before -// the resources can be fully referenced. -type Lease struct { - ID string - CreatedAt time.Time - Labels map[string]string -} - -// Resource represents low level resource of image, like content, ingest and -// snapshotter. -type Resource struct { - ID string - Type string -} - -// DeleteOptions provide options on image delete -type DeleteOptions struct { - Synchronous bool -} - -// SynchronousDelete is used to indicate that a lease deletion and removal of -// any unreferenced resources should occur synchronously before returning the -// result. -func SynchronousDelete(ctx context.Context, o *DeleteOptions) error { - o.Synchronous = true - return nil -} - -// WithLabels merges labels on a lease -func WithLabels(labels map[string]string) Opt { - return func(l *Lease) error { - if l.Labels == nil { - l.Labels = map[string]string{} - } - for k, v := range labels { - l.Labels[k] = v - } - return nil - } -} - -// WithExpiration sets an expiration on the lease -func WithExpiration(d time.Duration) Opt { - return func(l *Lease) error { - if l.Labels == nil { - l.Labels = map[string]string{} - } - l.Labels["containerd.io/gc.expire"] = time.Now().Add(d).Format(time.RFC3339) - - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/pkg/epoch/context.go b/vendor/github.com/containerd/containerd/pkg/epoch/context.go deleted file mode 100644 index fd16f951969f..000000000000 --- a/vendor/github.com/containerd/containerd/pkg/epoch/context.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package epoch - -import ( - "context" - "time" -) - -type ( - epochKey struct{} -) - -// WithSourceDateEpoch associates the context with the epoch. -func WithSourceDateEpoch(ctx context.Context, tm *time.Time) context.Context { - return context.WithValue(ctx, epochKey{}, tm) -} - -// FromContext returns the epoch associated with the context. -// FromContext does not fall back to read the SOURCE_DATE_EPOCH env var. -func FromContext(ctx context.Context) *time.Time { - v := ctx.Value(epochKey{}) - if v == nil { - return nil - } - return v.(*time.Time) -} diff --git a/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go deleted file mode 100644 index 124e8edb507a..000000000000 --- a/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package epoch provides SOURCE_DATE_EPOCH utilities. -package epoch - -import ( - "fmt" - "os" - "strconv" - "time" - - "github.com/sirupsen/logrus" -) - -// SourceDateEpochEnv is the SOURCE_DATE_EPOCH env var. -// See https://reproducible-builds.org/docs/source-date-epoch/ -const SourceDateEpochEnv = "SOURCE_DATE_EPOCH" - -// SourceDateEpoch returns the SOURCE_DATE_EPOCH env var as *time.Time. -// If the env var is not set, SourceDateEpoch returns nil without an error. -func SourceDateEpoch() (*time.Time, error) { - v, ok := os.LookupEnv(SourceDateEpochEnv) - if !ok || v == "" { - return nil, nil // not an error - } - i64, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid %s value %q: %w", SourceDateEpochEnv, v, err) - } - unix := time.Unix(i64, 0).UTC() - return &unix, nil -} - -// SourceDateEpochOrNow returns the SOURCE_DATE_EPOCH time if available, -// otherwise returns the current time. -func SourceDateEpochOrNow() time.Time { - epoch, err := SourceDateEpoch() - if err != nil { - logrus.WithError(err).Warnf("Invalid %s", SourceDateEpochEnv) - } - if epoch != nil { - return *epoch - } - return time.Now().UTC() -} - -// SetSourceDateEpoch sets the SOURCE_DATE_EPOCH env var. -func SetSourceDateEpoch(tm time.Time) { - os.Setenv(SourceDateEpochEnv, fmt.Sprintf("%d", tm.Unix())) -} - -// UnsetSourceDateEpoch unsets the SOURCE_DATE_EPOCH env var. -func UnsetSourceDateEpoch() { - os.Unsetenv(SourceDateEpochEnv) -} diff --git a/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go deleted file mode 100644 index f4b657d7dd87..000000000000 --- a/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package randutil provides utilities for [cyrpto/rand]. -package randutil - -import ( - "crypto/rand" - "math" - "math/big" -) - -// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood. -func Int63n(n int64) int64 { - b, err := rand.Int(rand.Reader, big.NewInt(n)) - if err != nil { - panic(err) - } - return b.Int64() -} - -// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood. -func Int63() int64 { - return Int63n(math.MaxInt64) -} - -// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood. -func Intn(n int) int { - return int(Int63n(int64(n))) -} - -// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood. -func Int() int { - return int(Int63()) -} diff --git a/vendor/github.com/containerd/containerd/pkg/snapshotters/annotations.go b/vendor/github.com/containerd/containerd/v2/pkg/snapshotters/annotations.go similarity index 95% rename from vendor/github.com/containerd/containerd/pkg/snapshotters/annotations.go rename to vendor/github.com/containerd/containerd/v2/pkg/snapshotters/annotations.go index 63476376c1b7..2e44b05fb31f 100644 --- a/vendor/github.com/containerd/containerd/pkg/snapshotters/annotations.go +++ b/vendor/github.com/containerd/containerd/v2/pkg/snapshotters/annotations.go @@ -19,8 +19,8 @@ package snapshotters import ( "context" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/pkg/labels" "github.com/containerd/log" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -55,8 +55,7 @@ func AppendInfoHandlerWrapper(ref string) func(f images.Handler) images.Handler if err != nil { return nil, err } - switch desc.MediaType { - case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: + if images.IsManifestType(desc.MediaType) { for i := range children { c := &children[i] if images.IsLayerType(c.MediaType) { diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go index c892c685117b..4d0dda0d3999 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go @@ -22,13 +22,13 @@ import ( "sync" "syscall" - "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/content/local" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/images/converter" - "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/images/converter" + "github.com/containerd/containerd/v2/pkg/archive" + "github.com/containerd/containerd/v2/pkg/archive/compression" + "github.com/containerd/containerd/v2/pkg/labels" + "github.com/containerd/containerd/v2/plugins/content/local" "github.com/containerd/errdefs" "github.com/containerd/fifo" "github.com/klauspost/compress/zstd" diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go index f3bf6bea0c67..c0a570d3c02a 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go @@ -13,8 +13,8 @@ import ( "context" "io" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images/converter" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go index 43c8e0228748..7a3af70927d0 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go @@ -20,7 +20,7 @@ import ( "strconv" "strings" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd/v2/core/content" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go index 78f860029a08..7a789b1a3a90 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go @@ -305,7 +305,33 @@ func Unpack(option UnpackOption) error { } if option.BackendConfigPath != "" { - args = append(args, "--backend-config", option.BackendConfigPath) + configBytes, err := os.ReadFile(option.BackendConfigPath) + if err != nil { + return errors.Wrapf(err, "fail to read backend config file %s", option.BackendConfigPath) + } + + var config map[string]interface{} + if err := json.Unmarshal(configBytes, &config); err != nil { + return errors.Wrapf(err, "fail to unmarshal backend config file %s", option.BackendConfigPath) + } + + backendConfigType, ok := config["backend"].(map[string]interface{})["type"] + if !ok { + return errors.New("backend config file should contain a valid backend type") + } + + backendConfig, ok := config["backend"].(map[string]interface{})[backendConfigType.(string)] + if !ok { + return errors.New("failed to get backend config with type " + backendConfigType.(string)) + } + + backendConfigBytes, err := json.Marshal(backendConfig) + if err != nil { + return errors.Wrapf(err, "fail to marshal backend config %v", backendConfig) + } + + args = append(args, "--backend-type", backendConfigType.(string)) + args = append(args, "--backend-config", string(backendConfigBytes)) } else if option.BlobPath != "" { args = append(args, "--blob", option.BlobPath) } diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go index 6bd54a6afc44..9827d0a37755 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd/v2/core/content" "github.com/containerd/nydus-snapshotter/pkg/converter/tool" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go index 7c09f76d0b20..74dabec318fe 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go @@ -16,7 +16,7 @@ import ( "io" "path/filepath" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd/v2/core/content" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go index f5392771d4c9..f6368f536079 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go @@ -7,7 +7,7 @@ package label import ( - snpkg "github.com/containerd/containerd/pkg/snapshotters" + snpkg "github.com/containerd/containerd/v2/pkg/snapshotters" ) // For package compatibility, we still keep the old exported name here. diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 5091fb0736c8..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index c56f37c0c943..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -sudo: false - -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - - 1.15.x - - tip - -allow_failures: - - go: tip - -script: make build - -matrix: - include: - - language: go - go: 1.15.x - script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a91fde..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index fb38ec2760e1..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,51 +0,0 @@ - -CMD = jpgo - -SRC_PKGS=./ ./cmd/... ./fuzz/... - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ${SRC_PKGS} - -build: - rm -f $(CMD) - go build ${SRC_PKGS} - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: test-internal-testify - echo "making tests ${SRC_PKGS}" - go test -v ${SRC_PKGS} - -check: - go vet ${SRC_PKGS} - @echo "golint ${SRC_PKGS}" - @lint=`golint ${SRC_PKGS}`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out - -test-internal-testify: - cd internal/testify && go test ./... - diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 110ad799976d..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -go-jmespath is a GO implementation of JMESPath, -which is a query language for JSON. It will take a JSON -document and transform it into another JSON document -through a JMESPath expression. - -Using go-jmespath is really easy. There's a single function -you use, `jmespath.search`: - - -```go -> import "github.com/jmespath/go-jmespath" -> -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.Search("foo.bar.baz[2]", data) -result = 2 -``` - -In the example we gave the ``search`` function input data of -`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath -expression `foo.bar.baz[2]`, and the `search` function evaluated -the expression against the input data to produce the result ``2``. - -The JMESPath language can do a lot more than select an element -from a list. Here are a few more examples: - -```go -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo.bar", data) -result = { "baz": [ 0, 1, 2, 3, 4 ] } - - -> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, - {"first": "c", "last": "d"}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search({"foo[*].first", data) -result [ 'a', 'c' ] - - -> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, - {"age": 30}, {"age": 35}, - {"age": 40}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo[?age > `30`]") -result = [ { age: 35 }, { age: 40 } ] -``` - -You can also pre-compile your query. This is usefull if -you are going to run multiple searches with it: - -```go - > var jsondata = []byte(`{"foo": "bar"}`) - > var data interface{} - > err := json.Unmarshal(jsondata, &data) - > precompiled, err := Compile("foo") - > if err != nil{ - > // ... handle the error - > } - > result, err := precompiled.Search(data) - result = "bar" -``` - -## More Resources - -The example above only show a small amount of what -a JMESPath expression can do. If you want to take a -tour of the language, the *best* place to go is the -[JMESPath Tutorial](http://jmespath.org/tutorial.html). - -One of the best things about JMESPath is that it is -implemented in many different programming languages including -python, ruby, php, lua, etc. To see a complete list of libraries, -check out the [JMESPath libraries page](http://jmespath.org/libraries.html). - -And finally, the full JMESPath specification can be found -on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 010efe9bfba3..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JMESPath is the representation of a compiled JMES path query. A JMESPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d239c969..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89b4bcc..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c74604c2c8..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c8f529..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 4abc303ab4a9..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expression: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cbdf338..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d7d460..000000000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4c1e3544c4ee..c49ab760fbb2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -123,8 +123,8 @@ github.com/anchore/go-struct-converter # github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 ## explicit github.com/armon/circbuf -# github.com/aws/aws-sdk-go-v2 v1.24.1 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2 v1.30.3 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/arn github.com/aws/aws-sdk-go-v2/aws/defaults @@ -144,6 +144,7 @@ github.com/aws/aws-sdk-go-v2/internal/awsutil github.com/aws/aws-sdk-go-v2/internal/context github.com/aws/aws-sdk-go-v2/internal/endpoints github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn +github.com/aws/aws-sdk-go-v2/internal/middleware github.com/aws/aws-sdk-go-v2/internal/rand github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/sdkio @@ -151,15 +152,15 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.26.6 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/config v1.27.27 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.16.16 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.27 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds github.com/aws/aws-sdk-go-v2/credentials/endpointcreds @@ -167,65 +168,65 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.8 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/feature/s3/manager -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/v4a github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 -## explicit; go 1.19 +# github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 +## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.19.0 -## explicit; go 1.19 +# github.com/aws/smithy-go v1.20.3 +## explicit; go 1.20 github.com/aws/smithy-go github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer @@ -266,22 +267,6 @@ github.com/containerd/cgroups/v3/cgroup1/stats # github.com/containerd/console v1.0.4 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.7.24 -## explicit; go 1.21 -github.com/containerd/containerd/archive -github.com/containerd/containerd/archive/compression -github.com/containerd/containerd/archive/tarheader -github.com/containerd/containerd/content -github.com/containerd/containerd/content/local -github.com/containerd/containerd/errdefs -github.com/containerd/containerd/filters -github.com/containerd/containerd/images -github.com/containerd/containerd/images/converter -github.com/containerd/containerd/labels -github.com/containerd/containerd/leases -github.com/containerd/containerd/pkg/epoch -github.com/containerd/containerd/pkg/randutil -github.com/containerd/containerd/pkg/snapshotters # github.com/containerd/containerd/api v1.8.0 ## explicit; go 1.21 github.com/containerd/containerd/api/events @@ -371,6 +356,7 @@ github.com/containerd/containerd/v2/pkg/protobuf/types github.com/containerd/containerd/v2/pkg/reference github.com/containerd/containerd/v2/pkg/rootfs github.com/containerd/containerd/v2/pkg/seccomp +github.com/containerd/containerd/v2/pkg/snapshotters github.com/containerd/containerd/v2/pkg/sys github.com/containerd/containerd/v2/pkg/tracing github.com/containerd/containerd/v2/plugins @@ -417,8 +403,8 @@ github.com/containerd/go-runc # github.com/containerd/log v0.1.0 ## explicit; go 1.20 github.com/containerd/log -# github.com/containerd/nydus-snapshotter v0.14.0 -## explicit; go 1.21 +# github.com/containerd/nydus-snapshotter v0.15.0 +## explicit; go 1.22.0 github.com/containerd/nydus-snapshotter/pkg/converter github.com/containerd/nydus-snapshotter/pkg/converter/tool github.com/containerd/nydus-snapshotter/pkg/label @@ -635,9 +621,6 @@ github.com/in-toto/in-toto-golang/in_toto github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1 github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2 -# github.com/jmespath/go-jmespath v0.4.0 -## explicit; go 1.14 -github.com/jmespath/go-jmespath # github.com/klauspost/compress v1.17.11 ## explicit; go 1.21 github.com/klauspost/compress