Skip to content

Commit

Permalink
feat: v4 (#300)
Browse files Browse the repository at this point in the history
* feat: v4

* feat: ready

* feat: update elfs

* server

* save

* add

* update

* add
  • Loading branch information
ratankaliani authored Jan 9, 2025
1 parent c4eef1a commit b5ec09a
Show file tree
Hide file tree
Showing 19 changed files with 617 additions and 721 deletions.
979 changes: 475 additions & 504 deletions Cargo.lock

Large diffs are not rendered by default.

24 changes: 10 additions & 14 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,10 @@ op-alloy-rpc-types-engine = { version = "0.6.8", default-features = false }
op-alloy-network = { version = "0.6.8", default-features = false }

# sp1
sp1-lib = { version = "3.4.0", features = ["verify"] }
# Note: This rev includes the SP1 mock groth16 bytes fix, skipping deferred verification, executor opts and network-v2 changes.
# ratan/v3.4.0-with-additions includes the network-v2 changes from `19ae3e1`. Has the latest FD's for ECDSA patch.
sp1-sdk = { git = "https://github.com/succinctlabs/sp1.git", rev = "f0b61cf262388c1be7edfcf984c8e070064e980f", features = [
"network-v2",
] }
sp1-zkvm = { version = "3.4.0", features = ["verify"] }
sp1-build = { version = "3.4.0" }
sp1-lib = { version = "4.0.0-rc.8", features = ["verify"] }
sp1-sdk = { version = "4.0.0-rc.8" }
sp1-zkvm = { version = "4.0.0-rc.8", features = ["verify"] }
sp1-build = { version = "4.0.0-rc.8" }

[profile.release-client-lto]
inherits = "release"
Expand All @@ -104,11 +100,11 @@ codegen-units = 1
lto = "fat"

[patch.crates-io]
tiny-keccak = { git = "https://github.com/sp1-patches/tiny-keccak", tag = "tiny_keccak-v2.0.2-patch-v1" }
sha2 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", tag = "sha2-v0.10.8-patch-v1" }
ecdsa = { git = "https://github.com/sp1-patches/signatures", tag = "ecdsa-v0.16.9-patch-v3.3.0" }
substrate-bn = { git = "https://github.com/sp1-patches/bn", tag = "substrate_bn-v0.6.0-patch-v2" }
sha3 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha3", tag = "sha3-v0.10.8-patch-v1" }
tiny-keccak = { git = "https://github.com/sp1-patches/tiny-keccak", tag = "patch-2.0.2-sp1-4.0.0-rc.3" }
sha2 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha2", tag = "patch-sha2-0.10.8-sp1-4.0.0-rc.3" }
ecdsa = { git = "https://github.com/sp1-patches/signatures", tag = "patch-0.16.9-sp1-4.0.0-rc.3-v2" }
substrate-bn = { git = "https://github.com/sp1-patches/bn", tag = "patch-0.6.0-sp1-4.0.0-rc.3-v1" }
sha3 = { git = "https://github.com/sp1-patches/RustCrypto-hashes", package = "sha3", tag = "patch-sha3-0.10.8-sp1-4.0.0-rc.3" }

# Note: Renamed this package to sp1_bls12_381 because it was published with that crate name to kzg_rs.
sp1_bls12_381 = { git = "https://github.com/sp1-patches/bls12_381.git", branch = "ratan/rename-package" }
sp1_bls12_381 = { git = "https://github.com/sp1-patches/bls12_381.git", branch = "ratan/patch-0.8.0-sp1-4.0.0-rc.3-v1-rename-package" }
4 changes: 2 additions & 2 deletions book/advanced/verify-binaries.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,11 @@ Then build the binaries:
```bash
cd programs/range
# Build the range-elf
cargo prove build --elf-name range-elf --docker --tag "v3.0.0"
cargo prove build --elf-name range-elf --docker --tag v4.0.0-rc.3

cd ../aggregation
# Build the aggregation-elf
cargo prove build --elf-name aggregation-elf --docker --tag "v3.0.0"
cargo prove build --elf-name aggregation-elf --docker --tag v4.0.0-rc.3
```

Now, verify the binaries by confirming the output of `vkey` matches the vkeys on the contract. The `vkey` program outputs the verification keys
Expand Down
Binary file modified elf/aggregation-elf
Binary file not shown.
Binary file modified elf/range-elf
Binary file not shown.
190 changes: 69 additions & 121 deletions proposer/succinct/bin/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,11 @@ use op_succinct_proposer::{
ValidateConfigRequest, ValidateConfigResponse,
};
use sp1_sdk::{
network_v2::{
client::NetworkClient,
proto::network::{ExecutionStatus, FulfillmentStatus, FulfillmentStrategy, ProofMode},
network::{
proto::network::{ExecutionStatus, FulfillmentStatus},
FulfillmentStrategy,
},
utils, HashableKey, NetworkProverV2, ProverClient, SP1Proof, SP1ProofWithPublicValues,
SP1Stdin,
utils, HashableKey, Prover, ProverClient, SP1Proof, SP1ProofWithPublicValues,
};
use std::{env, str::FromStr, time::Duration};
use tower_http::limit::RequestBodyLimitLayer;
Expand All @@ -46,7 +45,7 @@ async fn main() -> Result<()> {

dotenv::dotenv().ok();

let prover = ProverClient::new();
let prover = ProverClient::builder().mock().build();
let (range_pk, range_vk) = prover.setup(RANGE_ELF);
let (agg_pk, agg_vk) = prover.setup(AGG_ELF);
let multi_block_vkey_u8 = u32_to_u8(range_vk.vk.hash_u32());
Expand Down Expand Up @@ -184,61 +183,26 @@ async fn request_span_proof(
}
};

let private_key = match env::var("SP1_PRIVATE_KEY") {
Ok(private_key) => private_key,
Err(e) => {
error!("Failed to get SP1 private key: {}", e);
return Err(AppError(anyhow::anyhow!(
"Failed to get SP1 private key: {}",
e
)));
}
};
let rpc_url = match env::var("PROVER_NETWORK_RPC") {
Ok(rpc_url) => rpc_url,
Err(e) => {
error!("Failed to get PROVER_NETWORK_RPC: {}", e);
return Err(AppError(anyhow::anyhow!(
"Failed to get PROVER_NETWORK_RPC: {}",
e
)));
}
};
let mut prover = NetworkProverV2::new(&private_key, Some(rpc_url.to_string()), false);
// Use the reserved strategy to route to a specific cluster.
prover.with_strategy(FulfillmentStrategy::Reserved);

// Set simulation to false on range proofs as they're large.
env::set_var("SKIP_SIMULATION", "true");
let vk_hash = match prover.register_program(&state.range_vk, RANGE_ELF).await {
Ok(vk_hash) => vk_hash,
Err(e) => {
error!("Failed to register program: {}", e);
return Err(AppError(anyhow::anyhow!(
"Failed to register program: {}",
e
)));
}
};
let proof_id = match prover
.request_proof(
&vk_hash,
&sp1_stdin,
ProofMode::Compressed,
1_000_000_000_000,
None,
)
let client = ProverClient::builder().network().build();
let proof_id = client
.prove(&state.range_pk, &sp1_stdin)
.compressed()
.strategy(FulfillmentStrategy::Reserved)
.skip_simulation(true)
.cycle_limit(1_000_000_000_000)
.request_async()
.await
{
Ok(proof_id) => proof_id,
Err(e) => {
.map_err(|e| {
error!("Failed to request proof: {}", e);
return Err(AppError(anyhow::anyhow!("Failed to request proof: {}", e)));
}
};
env::set_var("SKIP_SIMULATION", "false");
AppError(anyhow::anyhow!("Failed to request proof: {}", e))
})?;

Ok((StatusCode::OK, Json(ProofResponse { proof_id })))
Ok((
StatusCode::OK,
Json(ProofResponse {
proof_id: proof_id.to_vec(),
}),
))
}

/// Request an aggregation proof for a set of subproofs.
Expand Down Expand Up @@ -315,11 +279,7 @@ async fn request_agg_proof(
}
};

let private_key = env::var("SP1_PRIVATE_KEY")?;
let rpc_url = env::var("PROVER_NETWORK_RPC")?;
let mut prover = NetworkProverV2::new(&private_key, Some(rpc_url.to_string()), false);
// Use the reserved strategy to route to a specific cluster.
prover.with_strategy(FulfillmentStrategy::Reserved);
let prover = ProverClient::builder().network().build();

let stdin =
match get_agg_proof_stdin(proofs, boot_infos, headers, &state.range_vk, l1_head.into()) {
Expand All @@ -333,24 +293,11 @@ async fn request_agg_proof(
}
};

let vk_hash = match prover.register_program(&state.agg_vk, AGG_ELF).await {
Ok(vk_hash) => vk_hash,
Err(e) => {
error!("Failed to register program: {}", e);
return Err(AppError(anyhow::anyhow!(
"Failed to register program: {}",
e
)));
}
};
let proof_id = match prover
.request_proof(
&vk_hash,
&stdin,
ProofMode::Groth16,
1_000_000_000_000,
None,
)
.prove(&state.agg_pk, &stdin)
.groth16()
.strategy(FulfillmentStrategy::Reserved)
.request_async()
.await
{
Ok(id) => id,
Expand All @@ -360,10 +307,15 @@ async fn request_agg_proof(
}
};

Ok((StatusCode::OK, Json(ProofResponse { proof_id })))
Ok((
StatusCode::OK,
Json(ProofResponse {
proof_id: proof_id.to_vec(),
}),
))
}

/// Request a proof for a span of blocks.
/// Request a mock proof for a span of blocks.
async fn request_mock_span_proof(
State(state): State<ContractConfig>,
Json(payload): Json<SpanProofRequest>,
Expand Down Expand Up @@ -418,10 +370,9 @@ async fn request_mock_span_proof(
}
};

let prover = ProverClient::mock();
let prover = ProverClient::builder().mock().build();
let proof = prover
.prove(&state.range_pk, sp1_stdin)
.set_skip_deferred_proof_verification(true)
.prove(&state.range_pk, &sp1_stdin)
.compressed()
.run()?;

Expand Down Expand Up @@ -492,7 +443,7 @@ async fn request_mock_agg_proof(
}
};

let prover = ProverClient::mock();
let prover = ProverClient::builder().mock().build();

let stdin =
match get_agg_proof_stdin(proofs, boot_infos, headers, &state.range_vk, l1_head.into()) {
Expand All @@ -503,11 +454,10 @@ async fn request_mock_agg_proof(
}
};

// Simulate the mock proof. proof.bytes() returns an empty byte array for mock proofs.
let proof = match prover
.prove(&state.agg_pk, stdin)
.set_skip_deferred_proof_verification(true)
.prove(&state.agg_pk, &stdin)
.groth16()
.deferred_proof_verification(false)
.run()
{
Ok(p) => p,
Expand All @@ -532,48 +482,46 @@ async fn get_proof_status(
Path(proof_id): Path<String>,
) -> Result<(StatusCode, Json<ProofStatus>), AppError> {
info!("Received proof status request: {:?}", proof_id);
let private_key = env::var("SP1_PRIVATE_KEY")?;
let rpc_url = env::var("PROVER_NETWORK_RPC")?;

let client = NetworkClient::new(&private_key, Some(rpc_url.to_string()));
let client = ProverClient::builder().network().build();

let proof_id_bytes = hex::decode(proof_id)?;

// Time out this request if it takes too long.
let timeout = Duration::from_secs(10);
let (status, maybe_proof) =
match tokio::time::timeout(timeout, client.get_proof_request_status(&proof_id_bytes)).await
{
Ok(Ok(result)) => result,
Ok(Err(_)) => {
return Ok((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ProofStatus {
fulfillment_status: FulfillmentStatus::UnspecifiedFulfillmentStatus.into(),
execution_status: ExecutionStatus::UnspecifiedExecutionStatus.into(),
proof: vec![],
}),
));
}
Err(_) => {
return Ok((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ProofStatus {
fulfillment_status: FulfillmentStatus::UnspecifiedFulfillmentStatus.into(),
execution_status: ExecutionStatus::UnspecifiedExecutionStatus.into(),
proof: vec![],
}),
));
}
};
let (status, maybe_proof) = match tokio::time::timeout(
timeout,
client.get_proof_status(B256::from_slice(&proof_id_bytes)),
)
.await
{
Ok(Ok(result)) => result,
Ok(Err(_)) => {
return Ok((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ProofStatus {
fulfillment_status: FulfillmentStatus::UnspecifiedFulfillmentStatus.into(),
execution_status: ExecutionStatus::UnspecifiedExecutionStatus.into(),
proof: vec![],
}),
));
}
Err(_) => {
return Ok((
StatusCode::INTERNAL_SERVER_ERROR,
Json(ProofStatus {
fulfillment_status: FulfillmentStatus::UnspecifiedFulfillmentStatus.into(),
execution_status: ExecutionStatus::UnspecifiedExecutionStatus.into(),
proof: vec![],
}),
));
}
};

let fulfillment_status = status.fulfillment_status;
let execution_status = status.execution_status;
if fulfillment_status == FulfillmentStatus::Fulfilled as i32 {
let mut proof: SP1ProofWithPublicValues = maybe_proof.unwrap();
// Remove the stdin from the proof, as it's unnecessary for verification. Note: In v4, there is no stdin.
// Previously, this caused the memory usage of the proposer to be high.
proof.stdin = SP1Stdin::default();
let proof: SP1ProofWithPublicValues = maybe_proof.unwrap();

match proof.proof {
SP1Proof::Compressed(_) => {
Expand Down
1 change: 1 addition & 0 deletions scripts/prove/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ kona-host = { workspace = true }
# local
op-succinct-host-utils.workspace = true
op-succinct-client-utils.workspace = true
op-succinct-scripts = { path = "../utils" }

# op-alloy
op-alloy-genesis.workspace = true
Expand Down
11 changes: 6 additions & 5 deletions scripts/prove/bin/agg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ struct Args {
fn load_aggregation_proof_data(
proof_names: Vec<String>,
range_vkey: &SP1VerifyingKey,
prover: &ProverClient,
) -> (Vec<SP1Proof>, Vec<BootInfoStruct>) {
let metadata = MetadataCommand::new().exec().unwrap();
let workspace_root = metadata.workspace_root;
Expand All @@ -42,6 +41,8 @@ fn load_aggregation_proof_data(
let mut proofs = Vec::with_capacity(proof_names.len());
let mut boot_infos = Vec::with_capacity(proof_names.len());

let prover = ProverClient::from_env();

for proof_name in proof_names.iter() {
let proof_path = format!("{}/{}.bin", proof_directory, proof_name);
if fs::metadata(&proof_path).is_err() {
Expand Down Expand Up @@ -71,12 +72,12 @@ async fn main() -> Result<()> {

dotenv::from_filename(args.env_file).ok();

let prover = ProverClient::new();
let prover = ProverClient::from_env();
let fetcher = OPSuccinctDataFetcher::new_with_rollup_config(RunContext::Dev).await?;

let (_, vkey) = prover.setup(RANGE_ELF);

let (proofs, boot_infos) = load_aggregation_proof_data(args.proofs, &vkey, &prover);
let (proofs, boot_infos) = load_aggregation_proof_data(args.proofs, &vkey);

let header = fetcher.get_latest_l1_head_in_batch(&boot_infos).await?;
let headers = fetcher
Expand All @@ -96,12 +97,12 @@ async fn main() -> Result<()> {

if args.prove {
prover
.prove(&agg_pk, stdin)
.prove(&agg_pk, &stdin)
.groth16()
.run()
.expect("proving failed");
} else {
let (_, report) = prover.execute(AGG_ELF, stdin).run().unwrap();
let (_, report) = prover.execute(AGG_ELF, &stdin).run().unwrap();
println!("report: {:?}", report);
}

Expand Down
Loading

0 comments on commit b5ec09a

Please sign in to comment.