Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

build: migrate logger to SLF4J APIs + Logback backend #513

Merged
merged 7 commits into from
Nov 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 25 additions & 6 deletions carapace-server/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,13 @@
<groupId>software.amazon.awssdk</groupId>
<artifactId>route53</artifactId>
<version>${libs.awssdk}</version>
<exclusions>
<!-- Using jcl-over-slf4j instead -->
<exclusion>
<artifactId>commons-logging</artifactId>
<groupId>commons-logging</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.github.ben-manes.caffeine</groupId>
Expand Down Expand Up @@ -114,6 +121,13 @@
<groupId>org.apache.bookkeeper.stats</groupId>
<artifactId>prometheus-metrics-provider</artifactId>
<version>${libs.bookkeeper}</version>
<exclusions>
<!-- Using jcl-over-slf4j instead -->
<exclusion>
<artifactId>commons-logging</artifactId>
<groupId>commons-logging</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
Expand All @@ -139,6 +153,11 @@
<groupId>org.bouncycastle</groupId>
<artifactId>bc-fips</artifactId>
</exclusion>
<!-- Using jcl-over-slf4j instead -->
<exclusion>
<artifactId>commons-logging</artifactId>
<groupId>commons-logging</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
Expand Down Expand Up @@ -209,10 +228,15 @@
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-jdk14</artifactId>
<artifactId>jcl-over-slf4j</artifactId>
<version>${libs.slf4j}</version>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
Expand Down Expand Up @@ -318,11 +342,6 @@
<groupId>org.junit.jupiter</groupId>
<artifactId>*</artifactId>
</exclusion>
<!-- We want a single SLF4J backend in our classpath! -->
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.servlet.ServletContext;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
Expand Down Expand Up @@ -75,6 +73,8 @@
import org.carapaceproxy.server.config.SSLCertificateConfiguration;
import org.carapaceproxy.server.config.SSLCertificateConfiguration.CertificateMode;
import org.carapaceproxy.utils.CertificatesUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Access to certificates
Expand All @@ -85,7 +85,7 @@
@Produces(MediaType.APPLICATION_JSON)
public class CertificatesResource {

private static final Logger LOG = Logger.getLogger(CertificatesResource.class.getName());
private static final Logger LOG = LoggerFactory.getLogger(CertificatesResource.class);

@Context
private ServletContext context;
Expand Down Expand Up @@ -210,7 +210,7 @@ private static void fillCertificateBean(
}
bean.setStatus(certificateStateToString(state));
} catch (GeneralSecurityException | IOException ex) {
LOG.log(Level.SEVERE, "Unable to read Keystore for certificate {0}. Reason: {1}", new Object[]{certificate.getId(), ex});
LOG.error("Unable to read Keystore for certificate {}", certificate.getId(), ex);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@
import java.io.StringReader;
import java.io.StringWriter;
import java.util.Properties;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.servlet.ServletContext;
import javax.ws.rs.Consumes;
import javax.ws.rs.GET;
Expand All @@ -39,6 +37,8 @@
import org.carapaceproxy.core.HttpProxyServer;
import org.carapaceproxy.server.config.ConfigurationChangeInProgressException;
import org.carapaceproxy.server.config.ConfigurationNotValidException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Access to proxy cache
Expand All @@ -49,7 +49,7 @@
@Produces("application/json")
public class ConfigResource {

private static final Logger LOG = Logger.getLogger(ConfigResource.class.getName());
private static final Logger LOG = LoggerFactory.getLogger(ConfigResource.class);

@javax.ws.rs.core.Context
ServletContext context;
Expand Down Expand Up @@ -142,10 +142,10 @@ public ConfigurationChangeResult getMaintenanceStatus() {
private void dumpAndValidateInputConfiguration(PropertiesConfigurationStore simpleStore) throws ConfigurationNotValidException {
int[] count = new int[]{0};
simpleStore.forEach((k, v) -> {
LOG.log(Level.INFO, "{0} -> {1}", new Object[]{k, v});
LOG.info("{} -> {}", k, v);
count[0]++;
});
LOG.log(Level.INFO, "Number of entries: " + count[0]);
LOG.info("Number of entries: {}", count[0]);
if (count[0] == 0) {
throw new ConfigurationNotValidException("No entries in the new configuration ?");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.apache.curator.ensemble.fixed.FixedEnsembleProvider;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
Expand All @@ -54,6 +52,8 @@
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.carapaceproxy.cluster.GroupMembershipHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Implementation based on ZooKeeper. This class is very simple, we are not expecting heavy traffic on ZooKeeper. We have two systems:
Expand All @@ -70,7 +70,7 @@ public class ZooKeeperGroupMembershipHandler implements GroupMembershipHandler,
public static final String PROPERTY_PEER_ADMIN_SERVER_PORT = "peer_admin_server_port"; // port of the Admin UI/API
public static final String PROPERTY_PEER_ADMIN_SERVER_HTTPS_PORT = "peer_admin_server_https_port"; // https port of the Admin UI/API

private static final Logger LOG = Logger.getLogger(ZooKeeperGroupMembershipHandler.class.getName());
private static final Logger LOG = LoggerFactory.getLogger(ZooKeeperGroupMembershipHandler.class);
private static final ObjectMapper MAPPER = new ObjectMapper();

private final CuratorFramework client;
Expand Down Expand Up @@ -101,10 +101,10 @@ public List<ACL> getAclForPath(String path) {
final ZKClientConfig zkClientConfig = new ZKClientConfig();
zkProperties.forEach((k, v) -> {
zkClientConfig.setProperty(k.toString(), v.toString());
LOG.log(Level.INFO, "Setting ZK client config: {0}={1}", new Object[]{k, v});
LOG.info("Setting ZK client config: {}={}", k, v);
});
ZookeeperFactory zkFactory = (String connect, int timeout, Watcher wtchr, boolean canBeReadOnly) -> {
LOG.log(Level.INFO, "Creating ZK client: {0}, timeout {1}, canBeReadOnly:{2}", new Object[]{connect, timeout, canBeReadOnly});
LOG.info("Creating ZK client: {}, timeout {}, canBeReadOnly:{}", connect, timeout, canBeReadOnly);
return new ZooKeeper(connect, timeout, wtchr, canBeReadOnly, zkClientConfig);
};

Expand All @@ -117,13 +117,13 @@ public List<ACL> getAclForPath(String path) {
.retryPolicy(new ExponentialBackoffRetry(1000, 2))
.ensembleProvider(new FixedEnsembleProvider(zkAddress, true))
.build();
LOG.log(Level.INFO, "Waiting for ZK client connection to be established");
LOG.info("Waiting for ZK client connection to be established");
try {
boolean ok = client.blockUntilConnected(zkTimeout, TimeUnit.MILLISECONDS);
if (!ok) {
LOG.log(Level.SEVERE, "First connection to ZK cannot be established");
LOG.error("First connection to ZK cannot be established");
} else {
LOG.log(Level.SEVERE, "First connection to ZK established with success");
LOG.error("First connection to ZK established with success");
}
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
Expand Down Expand Up @@ -173,7 +173,7 @@ public void storeLocalPeerInfo(Map<String, String> info) {
byte[] data = MAPPER.writeValueAsBytes(info);
client.setData().forPath(path, data);
} catch (Exception ex) {
LOG.log(Level.SEVERE, "Cannot store info for peer " + peerId, ex);
LOG.error("Cannot store info for peer {}", peerId, ex);
}
}

Expand All @@ -189,7 +189,7 @@ public Map<String, String> loadInfoForPeer(String id) {
}
}
} catch (Exception ex) {
LOG.log(Level.SEVERE, "Cannot load info for peer " + id, ex);
LOG.error("Cannot load info for peer {}", id, ex);
}
return null;
}
Expand All @@ -200,23 +200,23 @@ public void watchEvent(String eventId, EventCallback callback) {
final String path = "/proxy/events";
final String eventpath = "/proxy/events/" + eventId;

LOG.info("watching " + path);
LOG.info("watching {}", path);
PathChildrenCache cache = new PathChildrenCache(client, path, true);
// hold a strong reference to the PathChildrenCache
watchedEvents.add(cache);
cache.getListenable().addListener((PathChildrenCacheListener) (CuratorFramework cf, PathChildrenCacheEvent pcce) -> {
LOG.log(Level.INFO, "ZK event {0} at {1}", new Object[]{pcce, path});
LOG.info("ZK event {} at {}", pcce, path);
ChildData data = pcce.getData();
if (data != null && eventpath.equals(data.getPath())) {
byte[] content = data.getData();
LOG.log(Level.INFO, "ZK event content {0}", new Object[]{new String(content, StandardCharsets.UTF_8)});
LOG.info("ZK event content {}", new String(content, StandardCharsets.UTF_8));
if (content != null) {
Map<String, Object> info = MAPPER.readValue(new ByteArrayInputStream(content), Map.class);
String origin = info.remove("origin") + "";
if (peerId.equals(origin)) {
LOG.log(Level.INFO, "discard self originated event {0}", origin);
LOG.info("discard self originated event {}", origin);
} else {
LOG.log(Level.INFO, "handle event {0}", info);
LOG.info("handle event {}", info);
callback.eventFired(eventId, info);
}
}
Expand Down Expand Up @@ -244,7 +244,7 @@ public void fireEvent(String eventId, Map<String, Object> data) {
.withMode(CreateMode.PERSISTENT)
.forPath(path);
}
LOG.log(Level.INFO, "Fire event {0}", path);
LOG.info("Fire event {}", path);
Map<String, Object> info = new HashMap<>();
info.put("origin", peerId);
if (data != null) {
Expand All @@ -255,7 +255,7 @@ public void fireEvent(String eventId, Map<String, Object> data) {
client.setData()
.forPath(path, content);
} catch (Exception ex) {
LOG.log(Level.SEVERE, "Cannot fire event " + eventId, ex);
LOG.error("Cannot fire event {}", eventId, ex);
}

}
Expand All @@ -265,7 +265,7 @@ public List<String> getPeers() {
try {
return client.getChildren().forPath("/proxy/peers");
} catch (Exception ex) {
LOG.log(Level.SEVERE, "Cannot list peers", ex);
LOG.error("Cannot list peers", ex);
return Collections.emptyList();
}
}
Expand Down Expand Up @@ -293,17 +293,17 @@ public void executeInMutex(String mutexId, int timeout, Runnable runnable) {
try {
boolean acquired = mutex.acquire(timeout, TimeUnit.SECONDS);
if (!acquired) {
LOG.log(Level.INFO, "Failed to acquire lock for executeInMutex (mutexId: " + mutexId + ", peerId: " + peerId + ")");
LOG.info("Failed to acquire lock for executeInMutex (mutexId: {}, peerId: {})", mutexId, peerId);
return;
}
runnable.run();
} catch (Exception e) {
LOG.log(Level.SEVERE, "Failed to acquire lock for executeInMutex (mutexId: " + mutexId + ", peerId: " + peerId + ")", e);
LOG.error("Failed to acquire lock for executeInMutex (mutexId: {}, peerId: {})", mutexId, peerId, e);
} finally {
try {
mutex.release();
} catch (Exception e) {
LOG.log(Level.SEVERE, "Failed to release lock for executeInMutex (mutexId: " + mutexId + ", peerId: " + peerId + ")", e);
LOG.error("Failed to release lock for executeInMutex (mutexId: {}, peerId: {})", mutexId, peerId, e);
}
}
}
Expand Down
Loading
Loading