|
| 1 | +//! `lock` subcommand |
| 2 | +use std::collections::BTreeSet; |
| 3 | + |
| 4 | +use chrono::{DateTime, Local}; |
| 5 | +use derive_setters::Setters; |
| 6 | +use log::error; |
| 7 | +use rayon::ThreadPoolBuilder; |
| 8 | + |
| 9 | +use crate::{ |
| 10 | + backend::{ |
| 11 | + decrypt::{DecryptReadBackend, DecryptWriteBackend}, |
| 12 | + node::NodeType, |
| 13 | + FileType, |
| 14 | + }, |
| 15 | + blob::{tree::TreeStreamerOnce, BlobType}, |
| 16 | + error::{CommandErrorKind, RepositoryErrorKind, RusticResult}, |
| 17 | + id::Id, |
| 18 | + index::{ |
| 19 | + binarysorted::{IndexCollector, IndexType}, |
| 20 | + indexer::Indexer, |
| 21 | + GlobalIndex, ReadGlobalIndex, |
| 22 | + }, |
| 23 | + progress::{Progress, ProgressBars}, |
| 24 | + repofile::{IndexFile, SnapshotFile}, |
| 25 | + repository::{Open, Repository}, |
| 26 | +}; |
| 27 | + |
| 28 | +pub(super) mod constants { |
| 29 | + /// The maximum number of reader threads to use for locking. |
| 30 | + pub(super) const MAX_LOCKER_THREADS_NUM: usize = 20; |
| 31 | +} |
| 32 | + |
| 33 | +#[derive(Debug, Clone, Default, Copy, Setters)] |
| 34 | +/// Options for the `lock` command |
| 35 | +pub struct LockOptions { |
| 36 | + /// Extend locks even if the files are already locked long enough |
| 37 | + always_extend_lock: bool, |
| 38 | + |
| 39 | + /// Specify until when to extend the lock. If None, lock forever |
| 40 | + until: Option<DateTime<Local>>, |
| 41 | +} |
| 42 | + |
| 43 | +impl LockOptions { |
| 44 | + /// Lock the given snapshots and corresponding pack files |
| 45 | + pub fn lock<P: ProgressBars, S: Open>( |
| 46 | + &self, |
| 47 | + repo: &Repository<P, S>, |
| 48 | + snapshots: &[SnapshotFile], |
| 49 | + now: DateTime<Local>, |
| 50 | + ) -> RusticResult<()> { |
| 51 | + let pb = &repo.pb; |
| 52 | + let be = repo.dbe(); |
| 53 | + |
| 54 | + let mut index_files = Vec::new(); |
| 55 | + |
| 56 | + let p = pb.progress_counter("reading index..."); |
| 57 | + let mut index_collector = IndexCollector::new(IndexType::Full); |
| 58 | + for index in be.stream_all::<IndexFile>(&p)? { |
| 59 | + let (id, index) = index?; |
| 60 | + index_collector.extend(index.packs.clone()); |
| 61 | + index_files.push((id, index)); |
| 62 | + } |
| 63 | + let index = GlobalIndex::new_from_index(index_collector.into_index()); |
| 64 | + p.finish(); |
| 65 | + |
| 66 | + let snap_tress = snapshots.iter().map(|sn| sn.tree).collect(); |
| 67 | + let packs = find_needed_packs(be, &index, snap_tress, pb)?; |
| 68 | + self.lock_packs(repo, index_files, packs)?; |
| 69 | + |
| 70 | + self.lock_snapshots(repo, snapshots, now)?; |
| 71 | + |
| 72 | + Ok(()) |
| 73 | + } |
| 74 | + |
| 75 | + fn lock_snapshots<P: ProgressBars, S: Open>( |
| 76 | + &self, |
| 77 | + repo: &Repository<P, S>, |
| 78 | + snapshots: &[SnapshotFile], |
| 79 | + now: DateTime<Local>, |
| 80 | + ) -> RusticResult<()> { |
| 81 | + let mut new_snaps = Vec::new(); |
| 82 | + let mut remove_snaps = Vec::new(); |
| 83 | + let mut lock_snaps = Vec::new(); |
| 84 | + |
| 85 | + for snap in snapshots { |
| 86 | + if !snap.delete.is_locked(self.until) { |
| 87 | + new_snaps.push(SnapshotFile { |
| 88 | + delete: self.until.into(), |
| 89 | + ..snap.clone() |
| 90 | + }); |
| 91 | + if !snap.must_keep(now) { |
| 92 | + remove_snaps.push(snap.id); |
| 93 | + } |
| 94 | + } else if self.always_extend_lock { |
| 95 | + lock_snaps.push(snap.id); |
| 96 | + } |
| 97 | + } |
| 98 | + |
| 99 | + // save new snapshots |
| 100 | + let new_ids = repo.save_snapshots(new_snaps)?; |
| 101 | + lock_snaps.extend(new_ids); |
| 102 | + |
| 103 | + // remove old snapshots |
| 104 | + repo.delete_snapshots(&remove_snaps)?; |
| 105 | + |
| 106 | + // Do the actual locking |
| 107 | + lock_files(repo, FileType::Snapshot, &lock_snaps, self.until)?; |
| 108 | + |
| 109 | + Ok(()) |
| 110 | + } |
| 111 | + |
| 112 | + fn lock_packs<P: ProgressBars, S: Open>( |
| 113 | + &self, |
| 114 | + repo: &Repository<P, S>, |
| 115 | + index_files: Vec<(Id, IndexFile)>, |
| 116 | + packs: BTreeSet<Id>, |
| 117 | + ) -> RusticResult<()> { |
| 118 | + let mut lock_packs = Vec::new(); |
| 119 | + let mut remove_index = Vec::new(); |
| 120 | + |
| 121 | + // Check for indexfiles-to-modify and for packs to lock |
| 122 | + // Also already write the new index from the index files which are modified. |
| 123 | + let p = repo.pb.progress_counter("processing index files..."); |
| 124 | + p.set_length(index_files.len().try_into().unwrap()); |
| 125 | + let indexer = Indexer::new_unindexed(repo.dbe().clone()).into_shared(); |
| 126 | + for (id, mut index) in index_files { |
| 127 | + let mut modified = false; |
| 128 | + for pack in &mut index.packs { |
| 129 | + if !packs.contains(&pack.id) { |
| 130 | + continue; |
| 131 | + } |
| 132 | + if !pack.lock.is_locked(self.until) { |
| 133 | + pack.lock = self.until.into(); |
| 134 | + modified = true; |
| 135 | + lock_packs.push(pack.id); |
| 136 | + } else if self.always_extend_lock { |
| 137 | + lock_packs.push(pack.id); |
| 138 | + } |
| 139 | + } |
| 140 | + if modified { |
| 141 | + for pack in index.packs { |
| 142 | + indexer.write().unwrap().add(pack)?; |
| 143 | + } |
| 144 | + for pack_remove in index.packs_to_delete { |
| 145 | + indexer.write().unwrap().add_remove(pack_remove)?; |
| 146 | + } |
| 147 | + remove_index.push(id); |
| 148 | + } |
| 149 | + p.inc(1); |
| 150 | + } |
| 151 | + indexer.write().unwrap().finalize()?; |
| 152 | + p.finish(); |
| 153 | + |
| 154 | + // Remove old index files |
| 155 | + let p = repo.pb.progress_counter("removing old index files..."); |
| 156 | + repo.dbe() |
| 157 | + .delete_list(FileType::Index, true, remove_index.iter(), p)?; |
| 158 | + |
| 159 | + // Do the actual locking |
| 160 | + lock_files(repo, FileType::Pack, &lock_packs, self.until)?; |
| 161 | + |
| 162 | + Ok(()) |
| 163 | + } |
| 164 | +} |
| 165 | + |
| 166 | +fn lock_files<P: ProgressBars, S>( |
| 167 | + repo: &Repository<P, S>, |
| 168 | + file_type: FileType, |
| 169 | + ids: &[Id], |
| 170 | + until: Option<DateTime<Local>>, |
| 171 | +) -> RusticResult<()> { |
| 172 | + let pool = ThreadPoolBuilder::new() |
| 173 | + .num_threads(constants::MAX_LOCKER_THREADS_NUM) |
| 174 | + .build() |
| 175 | + .map_err(RepositoryErrorKind::FromThreadPoolbilderError)?; |
| 176 | + let p = &repo |
| 177 | + .pb |
| 178 | + .progress_counter(format!("locking {file_type:?} files..")); |
| 179 | + p.set_length(ids.len().try_into().unwrap()); |
| 180 | + let backend = &repo.be; |
| 181 | + pool.in_place_scope(|scope| { |
| 182 | + for id in ids { |
| 183 | + scope.spawn(move |_| { |
| 184 | + if let Err(e) = backend.lock(file_type, id, until) { |
| 185 | + // FIXME: Use error handling |
| 186 | + error!("lock failed for {file_type:?} {id:?}. {e}"); |
| 187 | + }; |
| 188 | + p.inc(1); |
| 189 | + }); |
| 190 | + } |
| 191 | + }); |
| 192 | + p.finish(); |
| 193 | + Ok(()) |
| 194 | +} |
| 195 | + |
| 196 | +/// Find packs which are needed for the given Trees |
| 197 | +/// |
| 198 | +/// # Arguments |
| 199 | +/// |
| 200 | +/// * `index` - The index to use |
| 201 | +/// * `trees` - The trees to consider |
| 202 | +/// * `pb` - The progress bars |
| 203 | +/// |
| 204 | +/// # Errors |
| 205 | +/// |
| 206 | +// TODO!: add errors! |
| 207 | +fn find_needed_packs( |
| 208 | + be: &impl DecryptReadBackend, |
| 209 | + index: &impl ReadGlobalIndex, |
| 210 | + trees: Vec<Id>, |
| 211 | + pb: &impl ProgressBars, |
| 212 | +) -> RusticResult<BTreeSet<Id>> { |
| 213 | + let p = pb.progress_counter("finding needed packs..."); |
| 214 | + |
| 215 | + let mut packs = BTreeSet::new(); |
| 216 | + |
| 217 | + for tree_id in &trees { |
| 218 | + _ = packs.insert( |
| 219 | + index |
| 220 | + .get_id(BlobType::Tree, tree_id) |
| 221 | + .ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*tree_id))? |
| 222 | + .pack, |
| 223 | + ); |
| 224 | + } |
| 225 | + |
| 226 | + let mut tree_streamer = TreeStreamerOnce::new(be, index, trees, p)?; |
| 227 | + while let Some(item) = tree_streamer.next().transpose()? { |
| 228 | + let (_, tree) = item; |
| 229 | + for node in tree.nodes { |
| 230 | + match node.node_type { |
| 231 | + NodeType::File => { |
| 232 | + for id in node.content.iter().flatten() { |
| 233 | + _ = packs.insert( |
| 234 | + index |
| 235 | + .get_id(BlobType::Data, id) |
| 236 | + .ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*id))? |
| 237 | + .pack, |
| 238 | + ); |
| 239 | + } |
| 240 | + } |
| 241 | + NodeType::Dir => { |
| 242 | + let id = &node.subtree.unwrap(); |
| 243 | + _ = packs.insert( |
| 244 | + index |
| 245 | + .get_id(BlobType::Tree, id) |
| 246 | + .ok_or_else(|| CommandErrorKind::IdNotFoundinIndex(*id))? |
| 247 | + .pack, |
| 248 | + ); |
| 249 | + } |
| 250 | + _ => {} // nothing to do |
| 251 | + } |
| 252 | + } |
| 253 | + } |
| 254 | + |
| 255 | + Ok(packs) |
| 256 | +} |
0 commit comments