Skip to content

Commit

Permalink
upgrae s3 client from aws sdk v1 to v2
Browse files Browse the repository at this point in the history
Signed-off-by: Henry Avetisyan <hga@yahooinc.com>
  • Loading branch information
havetisyan committed Aug 15, 2024
1 parent c0b66af commit 9454009
Show file tree
Hide file tree
Showing 14 changed files with 371 additions and 277 deletions.
37 changes: 10 additions & 27 deletions syncers/zms_aws_domain_syncer/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -14,24 +14,12 @@
<packaging>jar</packaging>

<properties>
<code.coverage.min>0.9410</code.coverage.min>
<code.coverage.min>0.9446</code.coverage.min>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<checkstyle.skip>true</checkstyle.skip>
</properties>

<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-bom</artifactId>
<version>${aws.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>

<dependencies>
<dependency>
<groupId>com.yahoo.athenz</groupId>
Expand All @@ -49,18 +37,19 @@
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<groupId>software.amazon.awssdk</groupId>
<artifactId>s3</artifactId>
<version>${aws2.version}</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-sts</artifactId>
<groupId>software.amazon.awssdk</groupId>
<artifactId>sts</artifactId>
<version>${aws2.version}</version>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>${testng.version}</version>
<scope>test</scope>
<groupId>software.amazon.awssdk</groupId>
<artifactId>apache-client</artifactId>
<version>${aws2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
Expand Down Expand Up @@ -93,12 +82,6 @@
<version>${guava.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<scope>test</scope>
</dependency>
</dependencies>

<build>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,67 +18,65 @@

package com.yahoo.athenz.zms_aws_domain_syncer;

import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.model.ObjectMetadata;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.codec.digest.DigestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;

import static com.amazonaws.RequestClientOptions.DEFAULT_STREAM_BUFFER_SIZE;
import software.amazon.awssdk.core.sync.RequestBody;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
import software.amazon.awssdk.services.s3.model.PutObjectRequest;
import software.amazon.awssdk.services.s3.model.ServerSideEncryption;

public class AwsSyncer {

private static final Logger LOGGER = LoggerFactory.getLogger(AwsSyncer.class);

private static final int MAX_RETRY_COUNT = 3;
private static final String OCTET_STREAM_CONTENT_TYPE = "application/octet-stream";

private final AmazonS3 s3client;
private final S3Client s3Client;

public AwsSyncer() throws Exception {
this.s3client = S3ClientFactory.getS3Client();
this.s3Client = S3ClientFactory.getS3Client();
}

public AwsSyncer(AmazonS3 s3client) {
this.s3client = s3client;
public AwsSyncer(S3Client s3Client) {
this.s3Client = s3Client;
}

public void uploadDomain(final String domainName, final String domJson) throws Exception {
public void uploadDomain(final String domainName, final String domJson) {

ObjectMetadata meta = new ObjectMetadata();
meta.setContentDisposition(domainName);
byte[] payload = domJson.getBytes();
meta.setContentLength(payload.length);
meta.setContentType(OCTET_STREAM_CONTENT_TYPE);
final String sseAlgorithm = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_SSE_ALGORITHM);
if (!Config.isEmpty(sseAlgorithm)) {
meta.setSSEAlgorithm(sseAlgorithm);
}
final String bucketName = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_BUCKET);

// now let's calculate our md5 digest

byte[] payload = domJson.getBytes();
byte[] md5Byte = DigestUtils.md5(payload);
String md5Meta = new String(Base64.encodeBase64(md5Byte));
meta.setContentMD5(md5Meta);

// Upload object with MD5 hash

PutObjectRequest.Builder putObjectRequestBuilder = PutObjectRequest.builder()
.bucket(bucketName)
.key(domainName)
.contentMD5(md5Meta);

if (!Config.isEmpty(sseAlgorithm)) {
putObjectRequestBuilder.serverSideEncryption(ServerSideEncryption.fromValue(sseAlgorithm));
}

PutObjectRequest putObjectRequest = putObjectRequestBuilder.build();

// in case we get a md5 mismatch exception from AWS, most likely
// there were some network errors, so we're going to retry our
// operations upto 3 times with some small delay between operations

for (int count = 0; true; count++) {

try (BufferedInputStream instr = new BufferedInputStream(
new ByteArrayInputStream(payload), DEFAULT_STREAM_BUFFER_SIZE)) {

// Amazon S3 never stores partial objects; if during this
// call an exception wasn't thrown, the entire object was stored.
try {

final String bucket = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_BUCKET);
s3client.putObject(bucket, domainName, instr, meta);
s3Client.putObject(putObjectRequest, RequestBody.fromBytes(payload));

} catch (Exception ex) {

Expand All @@ -91,7 +89,7 @@ public void uploadDomain(final String domainName, final String domJson) throws E
continue;
}

throw new Exception(ex);
throw ex;
}

// if we got here then no exception, and we successfully processed
Expand All @@ -103,14 +101,19 @@ public void uploadDomain(final String domainName, final String domJson) throws E
}
}

public void deleteDomain(final String domainName) throws Exception {
public void deleteDomain(final String domainName) {

try {
final String bucket = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_BUCKET);
s3client.deleteObject(bucket, domainName);
final String bucketName = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_BUCKET);
DeleteObjectRequest deleteObjectRequest = DeleteObjectRequest.builder()
.bucket(bucketName)
.key(domainName)
.build();

s3Client.deleteObject(deleteObjectRequest);
} catch (Exception ex) {
LOGGER.error("unable to delete domain: {}", domainName, ex);
throw new Exception(ex);
throw ex;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,19 @@

package com.yahoo.athenz.zms_aws_domain_syncer;

import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
import software.amazon.awssdk.http.SdkHttpClient;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.http.apache.ApacheHttpClient;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain;
import software.amazon.awssdk.services.s3.model.HeadBucketRequest;
import software.amazon.awssdk.services.s3.model.S3Exception;

import java.time.Duration;

public class S3ClientFactory {

Expand All @@ -35,7 +39,7 @@ public class S3ClientFactory {
private static final int DEFAULT_CONN_TIMEOUT = 10000;
private static final int DEFAULT_REQ_TIMEOUT = 20000;

public static AmazonS3 getS3Client() throws Exception {
public static S3Client getS3Client() throws Exception {
// load up credentials
// use the system props if set for aws key id and secret, else use zts client
final String bucket = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_BUCKET);
Expand All @@ -44,15 +48,13 @@ public static AmazonS3 getS3Client() throws Exception {
LOGGER.error(errMsg);
throw new Exception(errMsg);
}
ClientConfiguration cltConf = new ClientConfiguration();
cltConf.setConnectionTimeout(DEFAULT_CONN_TIMEOUT);
cltConf.setRequestTimeout(DEFAULT_REQ_TIMEOUT);
long connectionTimeout = DEFAULT_CONN_TIMEOUT;
long requestTimeout = DEFAULT_REQ_TIMEOUT;

final String connTimeout = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_CONNECT_TIMEOUT);
if (!Config.isEmpty(connTimeout)) {
try {
int connectionTimeout = Integer.parseInt(connTimeout);
cltConf.setConnectionTimeout(connectionTimeout);
connectionTimeout = Long.parseLong(connTimeout);
LOGGER.debug("using connection timeout: {}", connectionTimeout);
} catch (Exception exc) {
LOGGER.error("ignore connection timeout parameter: {}, bad value: {}",
Expand All @@ -63,53 +65,70 @@ public static AmazonS3 getS3Client() throws Exception {
final String reqTimeout = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_REQUEST_TIMEOUT);
if (!Config.isEmpty(reqTimeout)) {
try {
int requestTimeout = Integer.parseInt(reqTimeout);
cltConf.setRequestTimeout(requestTimeout);
requestTimeout = Long.parseLong(reqTimeout);
LOGGER.debug("using request timeout: {}", requestTimeout);
} catch (Exception exc) {
LOGGER.error("ignore request timeout parameter: {}, bad value: {}",
Config.SYNC_CFG_PARAM_AWS_REQUEST_TIMEOUT, reqTimeout);
}
}

AmazonS3 s3client;
SdkHttpClient apacheHttpClient = ApacheHttpClient.builder()
.connectionTimeout(Duration.ofMillis(connectionTimeout))
.socketTimeout(Duration.ofMillis(requestTimeout))
.build();

S3Client s3client;
final String awsKeyId = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_KEY_ID);
final String awsAccKey = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_ACCESS_KEY);
if (!Config.isEmpty(awsKeyId) && !Config.isEmpty(awsAccKey)) {
BasicAWSCredentials awsCreds = new BasicAWSCredentials(awsKeyId, awsAccKey);
s3client = AmazonS3ClientBuilder.standard()
.withCredentials(new AWSStaticCredentialsProvider(awsCreds))
.withClientConfiguration(cltConf)
.withRegion(getRegion())
AwsBasicCredentials awsCreds = AwsBasicCredentials.builder()
.accessKeyId(awsKeyId).secretAccessKey(awsAccKey).build();
StaticCredentialsProvider credentialsProvider = StaticCredentialsProvider.create(awsCreds);

s3client = S3Client.builder()
.credentialsProvider(credentialsProvider)
.httpClient(apacheHttpClient)
.region(getRegion())
.build();
} else {
s3client = AmazonS3ClientBuilder.standard()
.withCredentials(new InstanceProfileCredentialsProvider(false))
.withClientConfiguration(cltConf)
s3client = S3Client.builder()
.httpClient(apacheHttpClient)
.region(getRegion())
.build();
}

if (!s3client.doesBucketExistV2(bucket)) {
String errMsg = "bucket: " + bucket + " : does not exist in S3";
LOGGER.error(errMsg);
throw new Exception(errMsg);
}
verifyBucketExist(s3client, bucket);

LOGGER.debug("success: using bucket: {}", bucket);
return s3client;
}

private static Regions getRegion() {
public static Region getRegion() {

final String awsRegion = Config.getInstance().getConfigParam(Config.SYNC_CFG_PARAM_AWS_S3_REGION);
Regions region;
if (Config.isEmpty(awsRegion)) {
region = Regions.US_WEST_2;
LOGGER.info("default to aws region: US_WEST_2");
} else {
region = Regions.fromName(awsRegion);
LOGGER.info("using aws region: {}", awsRegion);
if (awsRegion != null && !awsRegion.isEmpty()) {
return Region.of(awsRegion);
}
try {
DefaultAwsRegionProviderChain regionProvider = DefaultAwsRegionProviderChain.builder().build();
return regionProvider.getRegion();
} catch (Exception ex) {
LOGGER.error("Unable to determine AWS region", ex);
}
return Region.US_WEST_2;
}

public static void verifyBucketExist(S3Client s3Client, String bucketName) {
try {
HeadBucketRequest request = HeadBucketRequest.builder()
.bucket(bucketName)
.build();
s3Client.headBucket(request);
} catch (Exception ex) {
String errMsg = "bucket: " + bucketName + " : does not exist in S3";
LOGGER.error(errMsg, ex);
throw ex;
}
return region;
}
}
Loading

0 comments on commit 9454009

Please sign in to comment.