Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "scroll-proving-sdk"
version = "0.2.0"
version = "0.3.0"
edition = "2024"

[[bin]]
Expand Down
5 changes: 3 additions & 2 deletions src/coordinator_handler/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use eyre::Context;
use http::{Method, StatusCode};
use reqwest::{Url, header::CONTENT_TYPE};
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_retry::{RetryTransientMiddleware, policies::ExponentialBackoff};
use reqwest_retry::{Jitter, RetryTransientMiddleware, policies::ExponentialBackoff};
use serde::{Deserialize, Serialize};
use tracing::Level;

Expand All @@ -22,7 +22,8 @@ impl Api {
pub fn new(cfg: CoordinatorConfig) -> eyre::Result<Self> {
let retry_wait_duration = Duration::from_secs(cfg.retry_wait_time_sec);
let retry_policy = ExponentialBackoff::builder()
.retry_bounds(retry_wait_duration / 2, retry_wait_duration)
.retry_bounds(retry_wait_duration, retry_wait_duration * 2)
.jitter(Jitter::None)
.build_with_max_retries(cfg.retry_count);

let client = ClientBuilder::new(reqwest::Client::new())
Expand Down
6 changes: 3 additions & 3 deletions src/coordinator_handler/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -288,9 +288,9 @@ mod tests {
let prover_types = vec![ProverType::Chunk];
let vks = vec!["mock_vk".to_string()];
let login_message = LoginMessage {
challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik".into(),
prover_version: "v4.4.45-37af5ef5-38a68e2-1c5093c".into(),
prover_name: "test".into(),
challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
prover_version: "v4.4.45-37af5ef5-38a68e2-1c5093c",
prover_name: "test",
prover_provider_type: ProverProviderType::Internal,
prover_types: ProverTypes(&prover_types),
vks: &vks,
Expand Down
4 changes: 4 additions & 0 deletions src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ pub struct Db {

impl Db {
pub fn new(path: impl AsRef<Path>) -> eyre::Result<Self> {
tracing::info!(
"Apply locol storage at {}",
path.as_ref().to_str().unwrap_or("WRONG PATH")
);
let db = DB::open_default(path)?;
Ok(Self { db })
}
Expand Down
12 changes: 11 additions & 1 deletion src/prover/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,18 @@ where
format_cloud_prover_name(self.cfg.prover_name_prefix.clone(), i)
};

let mut client_cfg = self.cfg.coordinator.clone();
if client_cfg.retry_wait_time_sec < self.cfg.prover.poll_interval_sec {
tracing::warn!(
"Enforce too short retry wait time ({}) to equal to poll interval ({})",
client_cfg.retry_wait_time_sec,
self.cfg.prover.poll_interval_sec,
);
client_cfg.retry_wait_time_sec = self.cfg.prover.poll_interval_sec;
}

CoordinatorClient::new(
self.cfg.coordinator.clone(),
client_cfg,
self.cfg.coordinator_prover_type(),
self.cfg.coordinator.suppress_empty_task_error,
get_vk_response.vks.clone(),
Expand Down
44 changes: 22 additions & 22 deletions src/prover/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,6 @@ where
let task_str = task.to_string();
let i = work_set.pop().expect("can not be empty");
provers.spawn(async move {
// Soft start delay to stagger the provers
sleep(self_clone.poll_delay()).await;

let coordinator_client = &self_clone.coordinator_clients[i];
let prover_name = &coordinator_client.prover_name;

Expand All @@ -111,6 +108,9 @@ where
}
i
});

// Soft start delay to stagger the provers
sleep(self.poll_delay()).await;
}

// wait until all tasks has been done
Expand Down Expand Up @@ -150,22 +150,20 @@ where
coordinator_client: &CoordinatorClient,
task_spec: Option<(ProofType, &str)>,
) -> eyre::Result<()> {
if let Some((coordinator_task, mut proving_task_id)) = self
.db
.as_ref()
.map(|db| db.get_task(&coordinator_client.key_signer.get_public_key()))
.unwrap_or_default()
let public_key = &coordinator_client.key_signer.get_public_key();
if let Some((coordinator_task, mut proving_task_id)) =
self.db.as_ref().and_then(|db| db.get_task(public_key))
{
let task_id = coordinator_task.clone().task_id;
debug!(task_id = %task_id, "got previous task from db");
info!(task_id = %task_id, "got previous task from db");
if self.proving_service.read().await.is_local() {
let proving_task = self
.request_proving(coordinator_client, &coordinator_task)
.await?;
proving_task_id = proving_task.task_id
}
return self
.handle_proving_progress(coordinator_client, &coordinator_task, proving_task_id)
.handle_proving_progress(coordinator_client, &coordinator_task, &proving_task_id)
.await;
}

Expand All @@ -186,7 +184,7 @@ where
let proving_task = self
.request_proving(coordinator_client, &coordinator_task)
.await?;
self.handle_proving_progress(coordinator_client, &coordinator_task, proving_task.task_id)
self.handle_proving_progress(coordinator_client, &coordinator_task, &proving_task.task_id)
.await
}

Expand Down Expand Up @@ -247,7 +245,7 @@ where
&self,
coordinator_client: &CoordinatorClient,
coordinator_task: &GetTaskResponse,
proving_service_task_id: String,
proving_service_task_id: &str,
) -> eyre::Result<()> {
let prover_name = &coordinator_client.prover_name;
let public_key = &coordinator_client.key_signer.get_public_key();
Expand All @@ -258,13 +256,18 @@ where
// Track last observed status to avoid spamming logs when status hasn't changed.
let mut last_status: Option<TaskStatus> = None;

if let Some(db) = &self.db {
info!(task_id = %proving_service_task_id, "store task to local db");
db.set_task(public_key, coordinator_task, proving_service_task_id);
}

loop {
let task = self
.proving_service
.write()
.await
.query_task(QueryTaskRequest {
task_id: proving_service_task_id.clone(),
task_id: proving_service_task_id.to_string(),
})
.await;

Expand All @@ -284,9 +287,6 @@ where
);
}
last_status.replace(current_status);
if let Some(db) = &self.db {
db.set_task(public_key, coordinator_task, &proving_service_task_id);
}
sleep(self.poll_delay()).await;
}
TaskStatus::Success => {
Expand All @@ -298,6 +298,9 @@ where
?proving_service_task_id,
"Task proved successfully"
);
if let Some(db) = &self.db {
db.delete_task(public_key);
}
self.submit_proof(
coordinator_client,
coordinator_task,
Expand All @@ -306,9 +309,6 @@ where
None,
)
.await?;
if let Some(db) = &self.db {
db.delete_task(public_key);
}
break;
}
TaskStatus::Failed => {
Expand All @@ -322,6 +322,9 @@ where
?task_err,
"Task failed"
);
if let Some(db) = &self.db {
db.delete_task(public_key);
}
self.submit_proof(
coordinator_client,
coordinator_task,
Expand All @@ -330,9 +333,6 @@ where
Some(task_err),
)
.await?;
if let Some(db) = &self.db {
db.delete_task(public_key);
}
break;
}
}
Expand Down
Loading