Skip to content

Commit

Permalink
Merge branch 'main' into auto-scale
Browse files Browse the repository at this point in the history
  • Loading branch information
smtmfft authored Aug 14, 2024
2 parents a55a4e6 + 5526cc0 commit a883c4b
Show file tree
Hide file tree
Showing 7 changed files with 74 additions and 26 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/ci-all.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@ on:
- "host/**"
- "lib/**"
- "script/**"
- "!docs/**"
pull_request:
paths:
- "host/**"
- "lib/**"
- "script/**"
- "!docs/**"
merge_group:

env:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/openapi-deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ name: OpenAPI
on:
push:
branches: ["main"]
paths-ignore:
- "docs/**"
pull_request:
merge_group:

Expand Down
4 changes: 3 additions & 1 deletion docs/README_Docker_and_RA.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ This tutorial was created to assist you in setting up Raiko and its SGX dependen

We recommended 4 cores and 8GB memory for running Raiko. 8 cores and 16GB memory is ideal; the bare minimum is 2 cores and 4GB memory (tentative).

We also recommend an EPC (Enclave memory) size of 4GB for mainnet, to prevent OOM errors. You can check your machine's EPC size by running `./script/check-epc-size.sh`.

## Prerequisites

Intel SGX is a technology that involves a considerable amount of configuration. Given its high level of configurability, the setup of your infrastructure may vary significantly depending on the attestation type (EPID, ECDSA) and other parameters. While we've strived to minimize the manual effort required to prepare the development environment, there are certain prerequisites that are challenging, if not impossible, to automate using Dockerfiles. This section outlines these prerequisites.
Expand Down Expand Up @@ -182,7 +184,7 @@ Currently Supported FMSPCs (on Hekla):
- 30606A000000
- 00706A100000

Please reach out to us in [discord](https://discord.com/invite/taikoxyz) channels if your machine doesn't have a listed FMSPC, if you've done the bootstrap process and obtained a quote we can try adding them to the On Chain RA process. We can't guarantee all FMSPCs will work, so you might have to switch machines.
Please reach out to us in [discord](https://discord.com/invite/taikoxyz) channels or create an issue on Github if your machine doesn't have a listed FMSPC, if you've done the bootstrap process and obtained a quote we can try adding them to the On Chain RA process. We can't guarantee all FMSPCs will work, so you might have to switch machines. **Please include your FMSPC, CPU and your machine's EPC Size in the Github issue! This helps us decide whether the machine/FMSPC is a suitable candidate to add.**

> **_NOTE:_** At the moment, we are aware of three cloud providers who offer compatible SGX machines: [*Tencent Cloud*](https://www.tencentcloud.com/document/product/213/45510), Alibaba Cloud and Azure. (Tencent Cloud is one of our ecosystem partners!) Specifically, Tencent Cloud's `M6ce` model, Alibaba Cloud's `g7t` model support `SGX-FMSPC 00606A000000` and Azure's `confidential compute` machines support `SGX-FMSPC 00906ED50000`.
>
Expand Down
17 changes: 14 additions & 3 deletions host/src/server/api/v1/proof.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use axum::{debug_handler, extract::State, routing::post, Json, Router};
use raiko_core::interfaces::ProofRequest;
use raiko_lib::prover::Proof;
use raiko_tasks::get_task_manager;
use serde_json::Value;
use utoipa::OpenApi;
Expand All @@ -9,9 +8,12 @@ use crate::{
interfaces::HostResult,
metrics::{dec_current_req, inc_current_req, inc_guest_req_count, inc_host_req_count},
proof::handle_proof,
server::api::v1::Status,
ProverState,
};

use super::ProofResponse;

#[utoipa::path(post, path = "/proof",
tag = "Proving",
request_body = ProofRequestOpt,
Expand All @@ -31,7 +33,7 @@ use crate::{
async fn proof_handler(
State(prover_state): State<ProverState>,
Json(req): Json<Value>,
) -> HostResult<Json<Proof>> {
) -> HostResult<Json<Status>> {
inc_current_req();
// Override the existing proof request config from the config file and command line
// options with the request from the client.
Expand All @@ -57,7 +59,16 @@ async fn proof_handler(
dec_current_req();
e
})
.map(Json)
.map(|proof| {
dec_current_req();
Json(Status::Ok {
data: ProofResponse {
output: None,
proof: proof.proof,
quote: proof.quote,
},
})
})
}

#[derive(OpenApi)]
Expand Down
2 changes: 1 addition & 1 deletion lib/src/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub type ProofKey = (ChainId, B256, u8);
#[derive(Debug, Serialize, ToSchema, Deserialize, Default)]
/// The response body of a proof request.
pub struct Proof {
/// The ZK proof.
/// The proof either TEE or ZK.
pub proof: Option<String>,
/// The TEE quote.
pub quote: Option<String>,
Expand Down
56 changes: 35 additions & 21 deletions provers/risc0/driver/src/bonsai.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use log::{debug, error, info, warn};
use raiko_lib::{
primitives::keccak::keccak,
prover::{IdWrite, ProofKey},
prover::{IdWrite, ProofKey, ProverError, ProverResult},
};
use risc0_zkvm::{
compute_image_id, is_dev_mode, serde::to_vec, sha::Digest, Assumption, ExecutorEnv,
Expand Down Expand Up @@ -48,7 +48,7 @@ pub async fn verify_bonsai_receipt<O: Eq + Debug + DeserializeOwned>(
}
}

let res = res.unwrap();
let res = res.ok_or_else(|| ProverError::GuestError("No res!".to_owned()))?;

if res.status == "RUNNING" {
info!(
Expand All @@ -69,7 +69,10 @@ pub async fn verify_bonsai_receipt<O: Eq + Debug + DeserializeOwned>(
.verify(image_id)
.expect("Receipt verification failed");
// verify output
let receipt_output: O = receipt.journal.decode().unwrap();
let receipt_output: O = receipt
.journal
.decode()
.map_err(|e| ProverError::GuestError(e.to_string()))?;
if expected_output == &receipt_output {
info!("Receipt validated!");
} else {
Expand Down Expand Up @@ -139,17 +142,19 @@ pub async fn maybe_prove<I: Serialize, O: Eq + Debug + Serialize + DeserializeOw
} else {
// run prover
info!("start running local prover");
(
Default::default(),
prove_locally(
param.execution_po2,
encoded_input,
elf,
assumption_instances,
param.profile,
),
false,
)
match prove_locally(
param.execution_po2,
encoded_input,
elf,
assumption_instances,
param.profile,
) {
Ok(receipt) => (Default::default(), receipt, false),
Err(e) => {
warn!("Failed to prove locally: {e:?}");
return None;
}
}
};

info!("receipt: {receipt:?}");
Expand Down Expand Up @@ -236,7 +241,7 @@ pub fn prove_locally(
elf: &[u8],
assumptions: Vec<Assumption>,
profile: bool,
) -> Receipt {
) -> ProverResult<Receipt> {
debug!("Proving with segment_limit_po2 = {segment_limit_po2:?}");
debug!(
"Input size: {} words ( {} MB )",
Expand All @@ -263,14 +268,23 @@ pub fn prove_locally(

let segment_dir = PathBuf::from("/tmp/risc0-cache");
if !segment_dir.exists() {
fs::create_dir(segment_dir.clone()).unwrap();
fs::create_dir(segment_dir.clone()).map_err(|e| ProverError::FileIo(e))?;
}
let env = env_builder.segment_path(segment_dir).build().unwrap();
let mut exec = ExecutorImpl::from_elf(env, elf).unwrap();

exec.run().unwrap()
let env = env_builder
.segment_path(segment_dir)
.build()
.map_err(|e| ProverError::GuestError(e.to_string()))?;
let mut exec =
ExecutorImpl::from_elf(env, elf).map_err(|e| ProverError::GuestError(e.to_string()))?;

exec.run()
.map_err(|e| ProverError::GuestError(e.to_string()))?
};
session.prove().unwrap().receipt
let receipt = session
.prove()
.map_err(|e| ProverError::GuestError(e.to_string()))?
.receipt;
Ok(receipt)
}

pub fn load_receipt<T: serde::de::DeserializeOwned>(
Expand Down
17 changes: 17 additions & 0 deletions script/check-epc-size.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/usr/bin/env bash

# Run dmesg and filter for EPC section
epc_line=$(sudo dmesg | fgrep EPC)

# Extract the start and end addresses using regex
if [[ $epc_line =~ 0x([0-9a-fA-F]+)-0x([0-9a-fA-F]+) ]]; then
start_address=0x${BASH_REMATCH[1]}
end_address=0x${BASH_REMATCH[2]}

# Calculate the EPC size in GB using Python
epc_size_gb=$(python3 -c "print(($end_address - $start_address) / 1024 ** 3)")

echo "EPC Size: $epc_size_gb GB"
else
echo "EPC section not found in dmesg output."
fi

0 comments on commit a883c4b

Please sign in to comment.