Skip to content

Commit 37bcc7b

Browse files
committed
add lock command
1 parent 0117234 commit 37bcc7b

File tree

5 files changed

+278
-0
lines changed

5 files changed

+278
-0
lines changed

crates/core/src/commands.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ pub mod dump;
1212
pub mod forget;
1313
pub mod init;
1414
pub mod key;
15+
pub mod lock;
1516
pub mod merge;
1617
pub mod prune;
1718
/// The `repair` command.

crates/core/src/commands/lock.rs

Lines changed: 256 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,256 @@
1+
//! `lock` subcommand
2+
use std::collections::BTreeSet;
3+
4+
use chrono::{DateTime, Local};
5+
use derive_setters::Setters;
6+
use log::error;
7+
use rayon::ThreadPoolBuilder;
8+
9+
use crate::{
10+
backend::{
11+
decrypt::{DecryptReadBackend, DecryptWriteBackend},
12+
node::NodeType,
13+
FileType,
14+
},
15+
blob::{tree::TreeStreamerOnce, BlobType},
16+
error::{CommandErrorKind, RepositoryErrorKind, RusticResult},
17+
id::Id,
18+
index::{
19+
binarysorted::{IndexCollector, IndexType},
20+
indexer::Indexer,
21+
GlobalIndex, ReadGlobalIndex,
22+
},
23+
progress::{Progress, ProgressBars},
24+
repofile::{IndexFile, SnapshotFile},
25+
repository::{Open, Repository},
26+
};
27+
28+
pub(super) mod constants {
29+
/// The maximum number of reader threads to use for locking.
30+
pub(super) const MAX_LOCKER_THREADS_NUM: usize = 20;
31+
}
32+
33+
#[derive(Debug, Clone, Default, Copy, Setters)]
34+
/// Options for the `lock` command
35+
pub struct LockOptions {
36+
/// Extend locks even if the files are already locked long enough
37+
always_extend_lock: bool,
38+
39+
/// Specify until when to extend the lock. If None, lock forever
40+
until: Option<DateTime<Local>>,
41+
}
42+
43+
impl LockOptions {
44+
/// Lock the given snapshots and corresponding pack files
45+
pub fn lock<P: ProgressBars, S: Open>(
46+
&self,
47+
repo: &Repository<P, S>,
48+
snapshots: &[SnapshotFile],
49+
now: DateTime<Local>,
50+
) -> RusticResult<()> {
51+
let pb = &repo.pb;
52+
let be = repo.dbe();
53+
54+
let mut index_files = Vec::new();
55+
56+
let p = pb.progress_counter("reading index...");
57+
let mut index_collector = IndexCollector::new(IndexType::Full);
58+
for index in be.stream_all::<IndexFile>(&p)? {
59+
let (id, index) = index?;
60+
index_collector.extend(index.packs.clone());
61+
index_files.push((id, index));
62+
}
63+
let index = GlobalIndex::new_from_index(index_collector.into_index());
64+
p.finish();
65+
66+
let snap_tress = snapshots.iter().map(|sn| sn.tree).collect();
67+
let packs = find_needed_packs(be, &index, snap_tress, pb)?;
68+
self.lock_packs(repo, index_files, packs)?;
69+
70+
self.lock_snapshots(repo, snapshots, now)?;
71+
72+
Ok(())
73+
}
74+
75+
fn lock_snapshots<P: ProgressBars, S: Open>(
76+
&self,
77+
repo: &Repository<P, S>,
78+
snapshots: &[SnapshotFile],
79+
now: DateTime<Local>,
80+
) -> RusticResult<()> {
81+
let mut new_snaps = Vec::new();
82+
let mut remove_snaps = Vec::new();
83+
let mut lock_snaps = Vec::new();
84+
85+
for snap in snapshots {
86+
if !snap.delete.is_locked(self.until) {
87+
new_snaps.push(SnapshotFile {
88+
delete: self.until.into(),
89+
..snap.clone()
90+
});
91+
if !snap.must_keep(now) {
92+
remove_snaps.push(snap.id);
93+
}
94+
} else if self.always_extend_lock {
95+
lock_snaps.push(snap.id);
96+
}
97+
}
98+
99+
// save new snapshots
100+
let new_ids = repo.save_snapshots(new_snaps)?;
101+
lock_snaps.extend(new_ids);
102+
103+
// remove old snapshots
104+
repo.delete_snapshots(&remove_snaps)?;
105+
106+
// Do the actual locking
107+
lock_files(repo, FileType::Snapshot, &lock_snaps, self.until)?;
108+
109+
Ok(())
110+
}
111+
112+
fn lock_packs<P: ProgressBars, S: Open>(
113+
&self,
114+
repo: &Repository<P, S>,
115+
index_files: Vec<(Id, IndexFile)>,
116+
packs: BTreeSet<Id>,
117+
) -> RusticResult<()> {
118+
let mut lock_packs = Vec::new();
119+
let mut remove_index = Vec::new();
120+
121+
// Check for indexfiles-to-modify and for packs to lock
122+
// Also already write the new index from the index files which are modified.
123+
let p = repo.pb.progress_counter("processing index files...");
124+
p.set_length(index_files.len().try_into().unwrap());
125+
let indexer = Indexer::new_unindexed(repo.dbe().clone()).into_shared();
126+
for (id, mut index) in index_files {
127+
let mut modified = false;
128+
for pack in &mut index.packs {
129+
if !packs.contains(&pack.id) {
130+
continue;
131+
}
132+
if !pack.lock.is_locked(self.until) {
133+
pack.lock = self.until.into();
134+
modified = true;
135+
lock_packs.push(pack.id);
136+
} else if self.always_extend_lock {
137+
lock_packs.push(pack.id);
138+
}
139+
}
140+
if modified {
141+
for pack in index.packs {
142+
indexer.write().unwrap().add(pack)?;
143+
}
144+
for pack_remove in index.packs_to_delete {
145+
indexer.write().unwrap().add_remove(pack_remove)?;
146+
}
147+
remove_index.push(id);
148+
}
149+
p.inc(1);
150+
}
151+
indexer.write().unwrap().finalize()?;
152+
p.finish();
153+
154+
// Remove old index files
155+
let p = repo.pb.progress_counter("removing old index files...");
156+
repo.dbe()
157+
.delete_list(FileType::Index, true, remove_index.iter(), p)?;
158+
159+
// Do the actual locking
160+
lock_files(repo, FileType::Pack, &lock_packs, self.until)?;
161+
162+
Ok(())
163+
}
164+
}
165+
166+
fn lock_files<P: ProgressBars, S>(
167+
repo: &Repository<P, S>,
168+
file_type: FileType,
169+
ids: &[Id],
170+
until: Option<DateTime<Local>>,
171+
) -> RusticResult<()> {
172+
let pool = ThreadPoolBuilder::new()
173+
.num_threads(constants::MAX_LOCKER_THREADS_NUM)
174+
.build()
175+
.map_err(RepositoryErrorKind::FromThreadPoolbilderError)?;
176+
let p = &repo
177+
.pb
178+
.progress_counter(format!("locking {file_type:?} files.."));
179+
p.set_length(ids.len().try_into().unwrap());
180+
let backend = &repo.be;
181+
pool.in_place_scope(|scope| {
182+
for id in ids {
183+
scope.spawn(move |_| {
184+
if let Err(e) = backend.lock(file_type, id, until) {
185+
// FIXME: Use error handling
186+
error!("lock failed for {file_type:?} {id:?}. {e}");
187+
};
188+
p.inc(1);
189+
});
190+
}
191+
});
192+
p.finish();
193+
Ok(())
194+
}
195+
196+
/// Find packs which are needed for the given Trees
197+
///
198+
/// # Arguments
199+
///
200+
/// * `index` - The index to use
201+
/// * `trees` - The trees to consider
202+
/// * `pb` - The progress bars
203+
///
204+
/// # Errors
205+
///
206+
// TODO!: add errors!
207+
fn find_needed_packs(
208+
be: &impl DecryptReadBackend,
209+
index: &impl ReadGlobalIndex,
210+
trees: Vec<Id>,
211+
pb: &impl ProgressBars,
212+
) -> RusticResult<BTreeSet<Id>> {
213+
let p = pb.progress_counter("finding needed packs...");
214+
215+
let mut packs = BTreeSet::new();
216+
217+
for tree_id in &trees {
218+
_ = packs.insert(
219+
index
220+
.get_id(BlobType::Tree, tree_id)
221+
.ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*tree_id))?
222+
.pack,
223+
);
224+
}
225+
226+
let mut tree_streamer = TreeStreamerOnce::new(be, index, trees, p)?;
227+
while let Some(item) = tree_streamer.next().transpose()? {
228+
let (_, tree) = item;
229+
for node in tree.nodes {
230+
match node.node_type {
231+
NodeType::File => {
232+
for id in node.content.iter().flatten() {
233+
_ = packs.insert(
234+
index
235+
.get_id(BlobType::Data, id)
236+
.ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*id))?
237+
.pack,
238+
);
239+
}
240+
}
241+
NodeType::Dir => {
242+
let id = &node.subtree.unwrap();
243+
_ = packs.insert(
244+
index
245+
.get_id(BlobType::Tree, id)
246+
.ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*id))?
247+
.pack,
248+
);
249+
}
250+
_ => {} // nothing to do
251+
}
252+
}
253+
}
254+
255+
Ok(packs)
256+
}

crates/core/src/error.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -214,6 +214,8 @@ pub enum CommandErrorKind {
214214
FromRayonError(#[from] rayon::ThreadPoolBuildError),
215215
/// conversion to `u64` failed: `{0:?}`
216216
ConversionToU64Failed(TryFromIntError),
217+
/// Id {0:?} not found in index
218+
IdNotFoundinIndex(Id),
217219
}
218220

219221
/// [`CryptoErrorKind`] describes the errors that can happen while dealing with Cryptographic functions

crates/core/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,7 @@ pub use crate::{
196196
copy::CopySnapshot,
197197
forget::{ForgetGroup, ForgetGroups, ForgetSnapshot, KeepOptions},
198198
key::KeyOptions,
199+
lock::LockOptions,
199200
prune::{PruneOptions, PrunePlan, PruneStats},
200201
repair::{index::RepairIndexOptions, snapshots::RepairSnapshotsOptions},
201202
repoinfo::{BlobInfo, IndexInfos, PackInfo, RepoFileInfo, RepoFileInfos},

crates/core/src/repository.rs

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ use std::{
1010
};
1111

1212
use bytes::Bytes;
13+
use chrono::Local;
1314
use derive_setters::Setters;
1415
use log::{debug, error, info};
1516
use serde_with::{serde_as, DisplayFromStr};
@@ -37,6 +38,7 @@ use crate::{
3738
copy::CopySnapshot,
3839
forget::{ForgetGroups, KeepOptions},
3940
key::KeyOptions,
41+
lock::LockOptions,
4042
prune::{PruneOptions, PrunePlan},
4143
repair::{index::RepairIndexOptions, snapshots::RepairSnapshotsOptions},
4244
repoinfo::{IndexInfos, RepoFileInfos},
@@ -1058,6 +1060,22 @@ impl<P: ProgressBars, S: Open> Repository<P, S> {
10581060
opts.get_plan(self)
10591061
}
10601062

1063+
/// Lock snapshot and pack files needed for the given snapshots
1064+
///
1065+
/// # Arguments
1066+
///
1067+
/// * `opts` - The lock options to use
1068+
/// * `snaps` - The snapshots to lock
1069+
/// * `until` - until when to lock. None means lock forever.
1070+
///
1071+
/// # Errors
1072+
///
1073+
// TODO: Document errors
1074+
pub fn lock(&self, opts: &LockOptions, snaps: &[SnapshotFile]) -> RusticResult<()> {
1075+
let now = Local::now();
1076+
opts.lock(self, snaps, now)
1077+
}
1078+
10611079
/// Turn the repository into the `IndexedFull` state by reading and storing the index
10621080
///
10631081
/// # Errors

0 commit comments

Comments
 (0)