Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Metadata Compression #241

Merged
merged 3 commits into from
Dec 13, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 3 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ crc16 = "0.4.0"
crc64 = "1.0.0"
caseless = "0.2.1"
arc-swap = "0.3.11"
reqwest = { version = "0.10.1", features = ["json"] }
reqwest = { version = "0.10.1", features = ["json", "gzip"] }
serde = "1.0"
serde_derive = "1.0.88"
serde_json = "1.0"
Expand Down Expand Up @@ -58,6 +58,8 @@ backtrace = "0.3"
jemallocator = "0.3.0"
async-trait = "0.1"
derivative = "2.1.1"
flate2 = "1"
base64 = "0.13.0"

[profile.release]
debug = true
Expand Down
2 changes: 2 additions & 0 deletions conf/coordinator.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,5 @@ address="127.0.0.1:6699"
broker_address = "127.0.0.1:7799"
reporter_id = "127.0.0.1:6699"
thread_number = 2
# Set this to true for large cluster
enable_compression = false
6 changes: 6 additions & 0 deletions src/bin/coordinator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,17 @@ fn gen_conf() -> CoordinatorConfig {

let proxy_timeout = s.get::<usize>("proxy_timeout").unwrap_or_else(|_| 2);

let enable_compression = s
.get::<bool>("enable_compression")
.unwrap_or_else(|_| false);

CoordinatorConfig {
address,
broker_addresses: Arc::new(ArcSwap::new(Arc::new(broker_address_list))),
reporter_id,
thread_number,
proxy_timeout,
enable_compression,
}
}

Expand All @@ -74,6 +79,7 @@ fn gen_service(
let data_broker = Arc::new(HttpMetaBroker::new(
config.broker_addresses.clone(),
http_client.clone(),
config.enable_compression,
));
let mani_broker = Arc::new(HttpMetaManipulationBroker::new(
config.broker_addresses.clone(),
Expand Down
1 change: 1 addition & 0 deletions src/bin/mem_broker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,7 @@ async fn main() -> std::io::Result<()> {
let service = service.clone();
App::new()
.wrap(middleware::Logger::default())
.wrap(middleware::Compress::default())
.configure(|cfg| configure_app(cfg, service.clone()))
})
.bind(&address)?
Expand Down
6 changes: 3 additions & 3 deletions src/common/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use std::collections::HashMap;
use std::str::FromStr;
use std::sync::atomic::{AtomicU64, Ordering};

#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct ClusterConfig {
#[serde(default)]
pub compression_strategy: CompressionStrategy,
Expand Down Expand Up @@ -74,7 +74,7 @@ impl ClusterConfig {
}
}

#[derive(Debug, PartialEq, Clone, Copy)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CompressionStrategy {
Disabled = 0,
// Only allow SET, SETEX, PSETEX, SETNX, GET, GETSET , MGET, MSET, MSETNX commands for String data type
Expand Down Expand Up @@ -137,7 +137,7 @@ impl<'de> Deserialize<'de> for CompressionStrategy {
}
}

#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct MigrationConfig {
pub max_migration_time: u64,
pub max_blocking_time: u64,
Expand Down
149 changes: 144 additions & 5 deletions src/common/proto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,12 @@ use crate::common::cluster::ClusterName;
use crate::common::config::ClusterConfig;
use crate::common::utils::extract_host_from_address;
use crate::protocol::{Array, BulkStr, Resp};
use flate2::Compression;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::io;
use std::io::Read;
use std::io::Write;
use std::iter::Peekable;
use std::str;

Expand All @@ -30,23 +34,89 @@ macro_rules! try_get {
#[derive(Debug, Clone, PartialEq)]
pub struct ClusterMapFlags {
pub force: bool,
pub compress: bool,
}

impl ClusterMapFlags {
pub fn to_arg(&self) -> String {
let mut flags = Vec::new();
if self.force {
"FORCE".to_string()
} else {
flags.push("FORCE");
}
if self.compress {
flags.push("COMPRESS");
}

if flags.is_empty() {
"NOFLAG".to_string()
} else {
flags.join(",")
}
}

pub fn from_arg(flags_str: &str) -> Self {
let force = has_flags(flags_str, ',', "FORCE");
ClusterMapFlags { force }
let compress = has_flags(flags_str, ',', "COMPRESS");
ClusterMapFlags { force, compress }
}
}

#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct ProxyClusterMetaData {
local: ProxyClusterMap,
peer: ProxyClusterMap,
clusters_config: ClusterConfigMap,
}

impl ProxyClusterMetaData {
pub fn new(
local: ProxyClusterMap,
peer: ProxyClusterMap,
clusters_config: ClusterConfigMap,
) -> Self {
Self {
local,
peer,
clusters_config,
}
}

pub fn gen_compressed_data(&self) -> Result<String, MetaCompressError> {
let s = serde_json::to_string(&self).map_err(|err| {
error!("failed to encode json for meta: {:?}", err);
MetaCompressError::Json
})?;
let mut encoder = flate2::write::GzEncoder::new(Vec::new(), Compression::fast());
encoder
.write_all(s.as_bytes())
.map_err(MetaCompressError::Io)?;
let buf = encoder.finish().map_err(MetaCompressError::Io)?;
Ok(base64::encode(&buf))
}

pub fn from_compressed_data(data: String) -> Result<Self, MetaCompressError> {
let raw = base64::decode(data).map_err(|err| {
error!("failed to decode base64 for meta: {:?}", err);
MetaCompressError::Base64
})?;
let r = io::Cursor::new(raw);
let mut gz = flate2::read::GzDecoder::new(r);
let mut s = String::new();
gz.read_to_string(&mut s).map_err(MetaCompressError::Io)?;
serde_json::from_str(s.as_str()).map_err(|err| {
error!("failed to decode json for meta: {:?}", err);
MetaCompressError::Json
})
}
}

#[derive(Debug)]
pub enum MetaCompressError {
Io(io::Error),
Json,
Base64,
}

const PEER_PREFIX: &str = "PEER";
const CONFIG_PREFIX: &str = "CONFIG";

Expand Down Expand Up @@ -95,6 +165,14 @@ impl ProxyClusterMeta {
&self.clusters_config
}

pub fn gen_data(&self) -> ProxyClusterMetaData {
ProxyClusterMetaData {
local: self.local.clone(),
peer: self.peer.clone(),
clusters_config: self.clusters_config.clone(),
}
}

pub fn from_resp<T: AsRef<[u8]>>(
resp: &Resp<T>,
) -> Result<(Self, Result<(), ParseExtendedMetaError>), CmdParseError> {
Expand Down Expand Up @@ -127,6 +205,36 @@ impl ProxyClusterMeta {

let flags = ClusterMapFlags::from_arg(&try_get!(it.next()));

if flags.compress {
let compressed_data = it.next().ok_or_else(|| {
error!("failed to get compressed data for UMCTL SETCLUSTER");
CmdParseError {}
})?;
let data =
ProxyClusterMetaData::from_compressed_data(compressed_data).map_err(|err| {
error!(
"failed to parse compressed data for UMCTL SETCLUSTER: {:?}",
err
);
CmdParseError {}
})?;
let ProxyClusterMetaData {
local,
peer,
clusters_config,
} = data;
return Ok((
Self {
epoch,
flags,
local,
peer,
clusters_config,
},
Ok(()),
));
}

let local = ProxyClusterMap::parse(it)?;
let mut peer = ProxyClusterMap::new(HashMap::new());
let mut clusters_config = ClusterConfigMap::default();
Expand Down Expand Up @@ -178,9 +286,20 @@ impl ProxyClusterMeta {
}
args
}

pub fn to_compressed_args(&self) -> Result<Vec<String>, MetaCompressError> {
let data = ProxyClusterMetaData::new(
self.local.clone(),
self.peer.clone(),
self.clusters_config.clone(),
)
.gen_compressed_data()?;
let args = vec![self.epoch.to_string(), self.flags.to_arg(), data];
Ok(args)
}
}

#[derive(Debug, Clone)]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct ProxyClusterMap {
cluster_map: HashMap<ClusterName, HashMap<String, Vec<SlotRange>>>,
}
Expand Down Expand Up @@ -280,7 +399,7 @@ impl ProxyClusterMap {
}
}

#[derive(Debug, Clone)]
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
pub struct ClusterConfigMap {
config_map: HashMap<ClusterName, ClusterConfig>,
}
Expand Down Expand Up @@ -802,6 +921,11 @@ mod tests {
args.sort();
cluster_args.sort();
assert_eq!(args, cluster_args);

let metadata = cluster_meta.gen_data();
let d = metadata.gen_compressed_data().unwrap();
let metadata2 = ProxyClusterMetaData::from_compressed_data(d).unwrap();
assert_eq!(metadata, metadata2);
}

#[test]
Expand Down Expand Up @@ -835,6 +959,11 @@ mod tests {
.compression_strategy,
CompressionStrategy::SetGetOnly
);

let metadata = cluster_meta.gen_data();
let d = metadata.gen_compressed_data().unwrap();
let metadata2 = ProxyClusterMetaData::from_compressed_data(d).unwrap();
assert_eq!(metadata, metadata2);
}

#[test]
Expand Down Expand Up @@ -866,6 +995,11 @@ mod tests {
assert!(extended_res.is_err());
assert_eq!(cluster_meta.epoch, 233);
assert!(cluster_meta.flags.force);

let metadata = cluster_meta.gen_data();
let d = metadata.gen_compressed_data().unwrap();
let metadata2 = ProxyClusterMetaData::from_compressed_data(d).unwrap();
assert_eq!(metadata, metadata2);
}

#[test]
Expand Down Expand Up @@ -897,6 +1031,11 @@ mod tests {
assert!(extended_res.is_err());
assert_eq!(cluster_meta.epoch, 233);
assert!(cluster_meta.flags.force);

let metadata = cluster_meta.gen_data();
let d = metadata.gen_compressed_data().unwrap();
let metadata2 = ProxyClusterMetaData::from_compressed_data(d).unwrap();
assert_eq!(metadata, metadata2);
}

#[test]
Expand Down
Loading