diff --git a/tfhe/src/high_level_api/booleans/inner.rs b/tfhe/src/high_level_api/booleans/inner.rs index a9b40226df..669675edaf 100644 --- a/tfhe/src/high_level_api/booleans/inner.rs +++ b/tfhe/src/high_level_api/booleans/inner.rs @@ -168,18 +168,6 @@ impl InnerBoolean { } } - /// Returns the inner cpu ciphertext if self is on the CPU, otherwise, returns a copy - /// that is on the CPU - pub(crate) fn into_cpu(self) -> BooleanBlock { - match self { - Self::Cpu(ct) => ct, - #[cfg(feature = "gpu")] - Self::Cuda(ct) => { - with_thread_local_cuda_streams(|streams| ct.to_boolean_block(streams)) - } - } - } - #[cfg(feature = "gpu")] pub(crate) fn into_gpu( self, diff --git a/tfhe/src/high_level_api/compressed_ciphertext_list.rs b/tfhe/src/high_level_api/compressed_ciphertext_list.rs index 5acca55e66..61eb137f3d 100644 --- a/tfhe/src/high_level_api/compressed_ciphertext_list.rs +++ b/tfhe/src/high_level_api/compressed_ciphertext_list.rs @@ -3,44 +3,73 @@ use tfhe_versionable::{Unversionize, UnversionizeError, Versionize, VersionizeOw use super::keys::InternalServerKey; use crate::backward_compatibility::compressed_ciphertext_list::CompressedCiphertextListVersions; use crate::core_crypto::commons::math::random::{Deserialize, Serialize}; +use crate::high_level_api::booleans::InnerBoolean; +use crate::high_level_api::errors::UninitializedServerKey; #[cfg(feature = "gpu")] use crate::high_level_api::global_state::with_thread_local_cuda_streams; use crate::high_level_api::integers::{FheIntId, FheUintId}; -use crate::integer::ciphertext::{Compressible, DataKind, Expandable}; +use crate::integer::ciphertext::{DataKind, Expandable}; #[cfg(feature = "gpu")] use crate::integer::gpu::ciphertext::compressed_ciphertext_list::{ - CudaCompressible, CudaExpandable, + CudaCompressedCiphertextList, CudaExpandable, }; +#[cfg(feature = "gpu")] +use crate::integer::gpu::ciphertext::CudaRadixCiphertext; use crate::named::Named; use crate::prelude::{CiphertextList, Tagged}; use crate::shortint::Ciphertext; use crate::{FheBool, FheInt, FheUint, Tag}; -impl Compressible for FheUint { - fn compress_into(self, messages: &mut Vec) -> DataKind { - self.ciphertext.into_cpu().compress_into(messages) +impl HlCompressible for FheUint { + fn compress_into(self, messages: &mut Vec<(ToBeCompressed, DataKind)>) { + match self.ciphertext { + crate::high_level_api::integers::unsigned::RadixCiphertext::Cpu(cpu_radix) => { + let blocks = cpu_radix.blocks; + let kind = DataKind::Unsigned(blocks.len()); + messages.push((ToBeCompressed::Cpu(blocks), kind)); + } + #[cfg(feature = "gpu")] + crate::high_level_api::integers::unsigned::RadixCiphertext::Cuda(gpu_radix) => { + let blocks = gpu_radix.ciphertext; + let kind = DataKind::Unsigned(blocks.info.blocks.len()); + messages.push((ToBeCompressed::Cuda(blocks), kind)); + } + } } } - -impl Compressible for FheInt { - fn compress_into(self, messages: &mut Vec) -> DataKind { - self.ciphertext.into_cpu().compress_into(messages) +impl HlCompressible for FheInt { + fn compress_into(self, messages: &mut Vec<(ToBeCompressed, DataKind)>) { + match self.ciphertext { + crate::high_level_api::integers::signed::RadixCiphertext::Cpu(cpu_radix) => { + let blocks = cpu_radix.blocks; + let kind = DataKind::Signed(blocks.len()); + messages.push((ToBeCompressed::Cpu(blocks), kind)); + } + #[cfg(feature = "gpu")] + crate::high_level_api::integers::signed::RadixCiphertext::Cuda(gpu_radix) => { + let blocks = gpu_radix.ciphertext; + let kind = DataKind::Signed(blocks.info.blocks.len()); + messages.push((ToBeCompressed::Cuda(blocks), kind)); + } + } } } - -impl Compressible for FheBool { - fn compress_into(self, messages: &mut Vec) -> DataKind { - self.ciphertext.into_cpu().compress_into(messages) +impl HlCompressible for FheBool { + fn compress_into(self, messages: &mut Vec<(ToBeCompressed, DataKind)>) { + match self.ciphertext { + InnerBoolean::Cpu(cpu_bool) => { + let kind = DataKind::Boolean; + messages.push((ToBeCompressed::Cpu(vec![cpu_bool.0]), kind)); + } + #[cfg(feature = "gpu")] + InnerBoolean::Cuda(cuda_bool) => { + let kind = DataKind::Boolean; + messages.push((ToBeCompressed::Cuda(cuda_bool.0.ciphertext), kind)); + } + } } } -impl HlCompressible for FheUint {} -impl HlCompressible for FheInt {} -impl HlCompressible for FheBool {} - -#[cfg(not(feature = "gpu"))] -pub trait HlCompressible: Compressible {} - impl HlExpandable for FheUint {} impl HlExpandable for FheInt {} impl HlExpandable for FheBool {} @@ -49,46 +78,32 @@ impl HlExpandable for FheBool {} pub trait HlExpandable: Expandable {} #[cfg(feature = "gpu")] pub trait HlExpandable: Expandable + CudaExpandable {} -#[cfg(feature = "gpu")] -pub trait HlCompressible: Compressible + CudaCompressible {} -#[allow(dead_code)] -enum InnerBuilder { - Cpu(crate::integer::ciphertext::CompressedCiphertextListBuilder), +pub trait HlCompressible { + fn compress_into(self, messages: &mut Vec<(ToBeCompressed, DataKind)>); +} + +pub enum ToBeCompressed { + Cpu(Vec), #[cfg(feature = "gpu")] - Cuda(crate::integer::gpu::ciphertext::compressed_ciphertext_list::CudaCompressedCiphertextListBuilder), + Cuda(CudaRadixCiphertext), } pub struct CompressedCiphertextListBuilder { - inner: InnerBuilder, + inner: Vec<(ToBeCompressed, DataKind)>, } impl CompressedCiphertextListBuilder { #[allow(clippy::new_without_default)] pub fn new() -> Self { - Self { - #[cfg(not(feature = "gpu"))] - inner: InnerBuilder::Cpu( - crate::integer::ciphertext::CompressedCiphertextListBuilder::new(), - ), - #[cfg(feature = "gpu")] - inner: InnerBuilder::Cuda(crate::integer::gpu::ciphertext::compressed_ciphertext_list::CudaCompressedCiphertextListBuilder::new()), - } + Self { inner: vec![] } } pub fn push(&mut self, value: T) -> &mut Self where T: HlCompressible, { - match &mut self.inner { - InnerBuilder::Cpu(inner) => { - inner.push(value); - } - #[cfg(feature = "gpu")] - InnerBuilder::Cuda(inner) => { - with_thread_local_cuda_streams(|streams| inner.push(value, streams)); - } - } + value.compress_into(&mut self.inner); self } @@ -96,62 +111,98 @@ impl CompressedCiphertextListBuilder { where T: HlCompressible, { - match &mut self.inner { - InnerBuilder::Cpu(inner) => { - inner.extend(values); - } - #[cfg(feature = "gpu")] - InnerBuilder::Cuda(inner) => { - with_thread_local_cuda_streams(|streams| inner.extend(values, streams)); - } + for value in values { + self.push(value); } self } pub fn build(&self) -> crate::Result { - match &self.inner { - InnerBuilder::Cpu(inner) => { - crate::high_level_api::global_state::try_with_internal_keys(|keys| match keys { - Some(InternalServerKey::Cpu(cpu_key)) => cpu_key - .key - .compression_key - .as_ref() - .ok_or_else(|| { - crate::Error::new("Compression key not set in server key".to_owned()) - }) - .map(|compression_key| CompressedCiphertextList { - inner: InnerCompressedCiphertextList::Cpu(inner.build(compression_key)), + crate::high_level_api::global_state::try_with_internal_keys(|keys| match keys { + Some(InternalServerKey::Cpu(cpu_key)) => { + let mut flat_cpu_blocks = vec![]; + for (element, _) in &self.inner { + match element { + ToBeCompressed::Cpu(cpu_blocks) => { + flat_cpu_blocks.extend_from_slice(cpu_blocks.as_slice()); + } + #[cfg(feature = "gpu")] + ToBeCompressed::Cuda(cuda_radix) => { + with_thread_local_cuda_streams(|streams| { + flat_cpu_blocks.append(&mut cuda_radix.to_cpu_blocks(streams)); + }); + } + } + } + cpu_key + .key + .compression_key + .as_ref() + .ok_or_else(|| { + crate::Error::new("Compression key not set in server key".to_owned()) + }) + .map(|compression_key| { + let compressed_list = compression_key + .key + .compress_ciphertexts_into_list(&flat_cpu_blocks); + let info = self.inner.iter().map(|(_, kind)| *kind).collect(); + + CompressedCiphertextList { + inner: InnerCompressedCiphertextList::Cpu( + crate::integer::ciphertext::CompressedCiphertextList { + packed_list: compressed_list, + info, + }, + ), tag: cpu_key.tag.clone(), - }), - _ => Err(crate::Error::new( - "A Cpu server key is needed to be set to use compression".to_owned(), - )), - }) + } + }) } #[cfg(feature = "gpu")] - InnerBuilder::Cuda(inner) => { - crate::high_level_api::global_state::try_with_internal_keys(|keys| match keys { - Some(InternalServerKey::Cuda(cuda_key)) => cuda_key - .key - .compression_key - .as_ref() - .ok_or_else(|| { - crate::Error::new("Compression key not set in server key".to_owned()) - }) - .map(|compression_key| CompressedCiphertextList { - inner: with_thread_local_cuda_streams(|streams| { - InnerCompressedCiphertextList::Cuda( - inner.build(compression_key, streams), - ) - }), + Some(InternalServerKey::Cuda(cuda_key)) => { + let mut cuda_radixes = vec![]; + for (element, _) in &self.inner { + match element { + ToBeCompressed::Cpu(cpu_blocks) => { + with_thread_local_cuda_streams(|streams| { + cuda_radixes.push(CudaRadixCiphertext::from_cpu_blocks( + cpu_blocks, streams, + )); + }) + } + #[cfg(feature = "gpu")] + ToBeCompressed::Cuda(cuda_radix) => { + with_thread_local_cuda_streams(|streams| { + cuda_radixes.push(cuda_radix.duplicate(streams)); + }); + } + } + } + + cuda_key + .key + .compression_key + .as_ref() + .ok_or_else(|| { + crate::Error::new("Compression key not set in server key".to_owned()) + }) + .map(|compression_key| { + let packed_list = with_thread_local_cuda_streams(|streams| { + compression_key + .compress_ciphertexts_into_list(cuda_radixes.as_slice(), streams) + }); + let info = self.inner.iter().map(|(_, kind)| *kind).collect(); + + let compressed_list = CudaCompressedCiphertextList { packed_list, info }; + + CompressedCiphertextList { + inner: InnerCompressedCiphertextList::Cuda(compressed_list), tag: cuda_key.tag.clone(), - }), - _ => Err(crate::Error::new( - "A Cuda server key is needed to be set to use compression".to_owned(), - )), - }) + } + }) } - } + None => Err(UninitializedServerKey.into()), + }) } } @@ -515,12 +566,12 @@ mod tests { use crate::shortint::parameters::list_compression::COMP_PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64; use crate::shortint::parameters::PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64; use crate::{ - set_server_key, CompressedCiphertextList, CompressedCiphertextListBuilder, FheBool, - FheInt64, FheUint16, FheUint2, FheUint32, + set_server_key, ClientKey, CompressedCiphertextList, CompressedCiphertextListBuilder, + FheBool, FheInt64, FheUint16, FheUint2, FheUint32, }; #[test] - fn test_compressed_ct_list() { + fn test_compressed_ct_list_cpu_gpu() { let config = crate::ConfigBuilder::with_custom_parameters( PARAM_MESSAGE_2_CARRY_2_KS_PBS_TUNIFORM_2M64, ) @@ -528,54 +579,89 @@ mod tests { .build(); let ck = crate::ClientKey::generate(config); - let sk = crate::ServerKey::new(&ck); - - set_server_key(sk); - - let ct1 = FheUint32::encrypt(17_u32, &ck); - - let ct2 = FheInt64::encrypt(-1i64, &ck); + let sk = crate::CompressedServerKey::new(&ck); - let ct3 = FheBool::encrypt(false, &ck); - - let ct4 = FheUint2::encrypt(3u8, &ck); + // Test with input data being on CPU + { + let ct1 = FheUint32::encrypt(17_u32, &ck); + let ct2 = FheInt64::encrypt(-1i64, &ck); + let ct3 = FheBool::encrypt(false, &ck); + let ct4 = FheUint2::encrypt(3u8, &ck); - let compressed_list = CompressedCiphertextListBuilder::new() - .push(ct1) - .push(ct2) - .push(ct3) - .push(ct4) - .build() - .unwrap(); + let mut compressed_list_builder = CompressedCiphertextListBuilder::new(); + compressed_list_builder + .push(ct1) + .push(ct2) + .push(ct3) + .push(ct4); - let serialized = bincode::serialize(&compressed_list).unwrap(); + set_server_key(sk.decompress()); + check_is_correct(&compressed_list_builder.build().unwrap(), &ck); - let compressed_list: CompressedCiphertextList = bincode::deserialize(&serialized).unwrap(); + #[cfg(feature = "gpu")] + { + set_server_key(sk.decompress_to_gpu()); + check_is_correct(&compressed_list_builder.build().unwrap(), &ck); + } + } + // Test with input data being on GPU + #[cfg(feature = "gpu")] { - let a: FheUint32 = compressed_list.get(0).unwrap().unwrap(); - let b: FheInt64 = compressed_list.get(1).unwrap().unwrap(); - let c: FheBool = compressed_list.get(2).unwrap().unwrap(); - let d: FheUint2 = compressed_list.get(3).unwrap().unwrap(); - - let a: u32 = a.decrypt(&ck); - assert_eq!(a, 17); - let b: i64 = b.decrypt(&ck); - assert_eq!(b, -1); - let c = c.decrypt(&ck); - assert!(!c); - let d: u8 = d.decrypt(&ck); - assert_eq!(d, 3); - - assert!(compressed_list.get::(4).unwrap().is_none()); + let mut ct1 = FheUint32::encrypt(17_u32, &ck); + let mut ct2 = FheInt64::encrypt(-1i64, &ck); + let mut ct3 = FheBool::encrypt(false, &ck); + let mut ct4 = FheUint2::encrypt(3u8, &ck); + + ct1.move_to_device(crate::Device::CudaGpu); + ct2.move_to_device(crate::Device::CudaGpu); + ct3.move_to_device(crate::Device::CudaGpu); + ct4.move_to_device(crate::Device::CudaGpu); + + let mut compressed_list_builder = CompressedCiphertextListBuilder::new(); + compressed_list_builder + .push(ct1) + .push(ct2) + .push(ct3) + .push(ct4); + + set_server_key(sk.decompress()); + check_is_correct(&compressed_list_builder.build().unwrap(), &ck); + + set_server_key(sk.decompress_to_gpu()); + check_is_correct(&compressed_list_builder.build().unwrap(), &ck); } - { - // Incorrect type - assert!(compressed_list.get::(0).is_err()); + fn check_is_correct(compressed_list: &CompressedCiphertextList, ck: &ClientKey) { + let serialized = bincode::serialize(&compressed_list).unwrap(); + + let compressed_list: CompressedCiphertextList = + bincode::deserialize(&serialized).unwrap(); + { + let a: FheUint32 = compressed_list.get(0).unwrap().unwrap(); + let b: FheInt64 = compressed_list.get(1).unwrap().unwrap(); + let c: FheBool = compressed_list.get(2).unwrap().unwrap(); + let d: FheUint2 = compressed_list.get(3).unwrap().unwrap(); + + let a: u32 = a.decrypt(ck); + assert_eq!(a, 17); + let b: i64 = b.decrypt(ck); + assert_eq!(b, -1); + let c = c.decrypt(ck); + assert!(!c); + let d: u8 = d.decrypt(ck); + assert_eq!(d, 3); + + assert!(compressed_list.get::(4).unwrap().is_none()); + } - // Correct type but wrong number of bits - assert!(compressed_list.get::(0).is_err()); + { + // Incorrect type + assert!(compressed_list.get::(0).is_err()); + + // Correct type but wrong number of bits + assert!(compressed_list.get::(0).is_err()); + } } } } diff --git a/tfhe/src/integer/gpu/ciphertext/compressed_ciphertext_list.rs b/tfhe/src/integer/gpu/ciphertext/compressed_ciphertext_list.rs index a33195efaa..ef4f88e13f 100644 --- a/tfhe/src/integer/gpu/ciphertext/compressed_ciphertext_list.rs +++ b/tfhe/src/integer/gpu/ciphertext/compressed_ciphertext_list.rs @@ -62,7 +62,7 @@ impl CudaExpandable for CudaBooleanBlock { } pub struct CudaCompressedCiphertextList { pub(crate) packed_list: CudaPackedGlweCiphertext, - info: Vec, + pub(crate) info: Vec, } impl CudaCompressedCiphertextList { diff --git a/tfhe/src/integer/gpu/ciphertext/mod.rs b/tfhe/src/integer/gpu/ciphertext/mod.rs index ed799d20a5..be9707e7de 100644 --- a/tfhe/src/integer/gpu/ciphertext/mod.rs +++ b/tfhe/src/integer/gpu/ciphertext/mod.rs @@ -7,7 +7,7 @@ use crate::core_crypto::gpu::vec::CudaVec; use crate::core_crypto::gpu::CudaStreams; use crate::core_crypto::prelude::{LweCiphertextList, LweCiphertextOwned}; use crate::integer::gpu::ciphertext::info::{CudaBlockInfo, CudaRadixCiphertextInfo}; -use crate::integer::{RadixCiphertext, SignedRadixCiphertext}; +use crate::integer::{IntegerCiphertext, RadixCiphertext, SignedRadixCiphertext}; use crate::shortint::Ciphertext; pub trait CudaIntegerRadixCiphertext: Sized { @@ -96,6 +96,60 @@ impl CudaIntegerRadixCiphertext for CudaSignedRadixCiphertext { } } +impl CudaRadixCiphertext { + pub fn from_cpu_blocks(blocks: &[Ciphertext], streams: &CudaStreams) -> Self { + let mut h_radix_ciphertext = blocks + .iter() + .flat_map(|block| block.ct.clone().into_container()) + .collect::>(); + + let lwe_size = blocks.first().unwrap().ct.lwe_size(); + let ciphertext_modulus = blocks.first().unwrap().ct.ciphertext_modulus(); + + let h_ct = LweCiphertextList::from_container( + h_radix_ciphertext.as_mut_slice(), + lwe_size, + ciphertext_modulus, + ); + let d_blocks = CudaLweCiphertextList::from_lwe_ciphertext_list(&h_ct, streams); + + let info = CudaRadixCiphertextInfo { + blocks: blocks + .iter() + .map(|block| CudaBlockInfo { + degree: block.degree, + message_modulus: block.message_modulus, + carry_modulus: block.carry_modulus, + pbs_order: block.pbs_order, + noise_level: block.noise_level(), + }) + .collect(), + }; + + Self { d_blocks, info } + } + + pub fn to_cpu_blocks(&self, streams: &CudaStreams) -> Vec { + let h_lwe_ciphertext_list = self.d_blocks.to_lwe_ciphertext_list(streams); + let ciphertext_modulus = h_lwe_ciphertext_list.ciphertext_modulus(); + let lwe_size = h_lwe_ciphertext_list.lwe_size().0; + + h_lwe_ciphertext_list + .into_container() + .chunks(lwe_size) + .zip(&self.info.blocks) + .map(|(data, i)| Ciphertext { + ct: LweCiphertextOwned::from_container(data.to_vec(), ciphertext_modulus), + degree: i.degree, + noise_level: i.noise_level, + message_modulus: i.message_modulus, + carry_modulus: i.carry_modulus, + pbs_order: i.pbs_order, + }) + .collect() + } +} + impl CudaUnsignedRadixCiphertext { pub fn new(d_blocks: CudaLweCiphertextList, info: CudaRadixCiphertextInfo) -> Self { Self { @@ -134,38 +188,8 @@ impl CudaUnsignedRadixCiphertext { /// assert_eq!(h_ctxt, ctxt); /// ``` pub fn from_radix_ciphertext(radix: &RadixCiphertext, streams: &CudaStreams) -> Self { - let mut h_radix_ciphertext = radix - .blocks - .iter() - .flat_map(|block| block.ct.clone().into_container()) - .collect::>(); - - let lwe_size = radix.blocks.first().unwrap().ct.lwe_size(); - let ciphertext_modulus = radix.blocks.first().unwrap().ct.ciphertext_modulus(); - - let h_ct = LweCiphertextList::from_container( - h_radix_ciphertext.as_mut_slice(), - lwe_size, - ciphertext_modulus, - ); - let d_blocks = CudaLweCiphertextList::from_lwe_ciphertext_list(&h_ct, streams); - - let info = CudaRadixCiphertextInfo { - blocks: radix - .blocks - .iter() - .map(|block| CudaBlockInfo { - degree: block.degree, - message_modulus: block.message_modulus, - carry_modulus: block.carry_modulus, - pbs_order: block.pbs_order, - noise_level: block.noise_level(), - }) - .collect(), - }; - Self { - ciphertext: CudaRadixCiphertext { d_blocks, info }, + ciphertext: CudaRadixCiphertext::from_cpu_blocks(radix.blocks(), streams), } } @@ -228,25 +252,7 @@ impl CudaUnsignedRadixCiphertext { /// assert_eq!(msg1, msg2); /// ``` pub fn to_radix_ciphertext(&self, streams: &CudaStreams) -> RadixCiphertext { - let h_lwe_ciphertext_list = self.ciphertext.d_blocks.to_lwe_ciphertext_list(streams); - let ciphertext_modulus = h_lwe_ciphertext_list.ciphertext_modulus(); - let lwe_size = h_lwe_ciphertext_list.lwe_size().0; - - let h_blocks: Vec = h_lwe_ciphertext_list - .into_container() - .chunks(lwe_size) - .zip(&self.ciphertext.info.blocks) - .map(|(data, i)| Ciphertext { - ct: LweCiphertextOwned::from_container(data.to_vec(), ciphertext_modulus), - degree: i.degree, - noise_level: i.noise_level, - message_modulus: i.message_modulus, - carry_modulus: i.carry_modulus, - pbs_order: i.pbs_order, - }) - .collect(); - - RadixCiphertext::from(h_blocks) + RadixCiphertext::from(self.ciphertext.to_cpu_blocks(streams)) } } @@ -291,38 +297,8 @@ impl CudaSignedRadixCiphertext { radix: &SignedRadixCiphertext, streams: &CudaStreams, ) -> Self { - let mut h_radix_ciphertext = radix - .blocks - .iter() - .flat_map(|block| block.ct.clone().into_container()) - .collect::>(); - - let lwe_size = radix.blocks.first().unwrap().ct.lwe_size(); - let ciphertext_modulus = radix.blocks.first().unwrap().ct.ciphertext_modulus(); - - let h_ct = LweCiphertextList::from_container( - h_radix_ciphertext.as_mut_slice(), - lwe_size, - ciphertext_modulus, - ); - let d_blocks = CudaLweCiphertextList::from_lwe_ciphertext_list(&h_ct, streams); - - let info = CudaRadixCiphertextInfo { - blocks: radix - .blocks - .iter() - .map(|block| CudaBlockInfo { - degree: block.degree, - message_modulus: block.message_modulus, - carry_modulus: block.carry_modulus, - pbs_order: block.pbs_order, - noise_level: block.noise_level(), - }) - .collect(), - }; - Self { - ciphertext: CudaRadixCiphertext { d_blocks, info }, + ciphertext: CudaRadixCiphertext::from_cpu_blocks(radix.blocks(), streams), } } @@ -389,25 +365,7 @@ impl CudaSignedRadixCiphertext { /// assert_eq!(msg1, msg2); /// ``` pub fn to_signed_radix_ciphertext(&self, streams: &CudaStreams) -> SignedRadixCiphertext { - let h_lwe_ciphertext_list = self.ciphertext.d_blocks.to_lwe_ciphertext_list(streams); - let ciphertext_modulus = h_lwe_ciphertext_list.ciphertext_modulus(); - let lwe_size = h_lwe_ciphertext_list.lwe_size().0; - - let h_blocks: Vec = h_lwe_ciphertext_list - .into_container() - .chunks(lwe_size) - .zip(&self.ciphertext.info.blocks) - .map(|(data, i)| Ciphertext { - ct: LweCiphertextOwned::from_container(data.to_vec(), ciphertext_modulus), - degree: i.degree, - noise_level: i.noise_level, - message_modulus: i.message_modulus, - carry_modulus: i.carry_modulus, - pbs_order: i.pbs_order, - }) - .collect(); - - SignedRadixCiphertext::from(h_blocks) + SignedRadixCiphertext::from(self.ciphertext.to_cpu_blocks(streams)) } }