diff --git a/Cargo.toml b/Cargo.toml index 746a22a..2137583 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,11 +25,11 @@ sha2 = "0.9" tempfile = "3.1" thiserror = "1.0" uuid = { version = "0.8", features = ["v4"] } +vfs = "0.4" walkdir = "2.3" [dev-dependencies] proptest = "0.10" -vfs = "0.4" [dev-dependencies.cargo-husky] version = "1" diff --git a/src/backup.rs b/src/backup.rs index 21f694b..13754e2 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -1,32 +1,35 @@ +use crate::repository::Repository; use anyhow::Result; use anyhow::*; -use std::path::Path; - -use walkdir::WalkDir; - -use crate::repository::Repository; +use vfs::VfsPath; pub struct Engine<'a> { - source_path: &'a Path, + source_path: &'a VfsPath, repository: &'a mut Repository, } impl<'a> Engine<'a> { - pub fn new(source_path: &'a Path, repository: &'a mut Repository) -> Result { - if source_path.ancestors().any(|a| a == repository.path()) { + pub fn new(source_path: &'a VfsPath, repository: &'a mut Repository) -> Result { + let mut ancestors = vec![]; + let mut current = Some(source_path.clone()); + while let Some(path) = current { + ancestors.push(path.clone()); + current = path.parent(); + } + if ancestors.into_iter().any(|a| &a == repository.path()) { return Err(anyhow!("source same as repository")); } Ok(Engine { source_path, repository }) } pub fn backup(&mut self) -> Result<()> { - let walker = WalkDir::new(self.source_path); + let walker = self.source_path.walk_dir()?; let save_every = 16; let mut save_counter = 0; for maybe_entry in walker { let entry = maybe_entry?; - if entry.path() != self.source_path { - self.repository.store(entry.path())?; + if &entry != self.source_path { + self.repository.store(&entry)?; } save_counter += 1; if save_counter == save_every { diff --git a/src/index/io.rs b/src/index/io.rs index 8c6ab07..ec10197 100644 --- a/src/index/io.rs +++ b/src/index/io.rs @@ -1,7 +1,5 @@ -use atomicwrites::{AllowOverwrite, AtomicFile}; use std::collections::HashMap; -use std::fs; -use std::path::{Path, PathBuf}; +use vfs::VfsPath; use uuid::Uuid; @@ -15,69 +13,80 @@ use nix::unistd::getpid; use std::{cmp::max, io::Write}; impl Index { - pub fn load>(repository_path: T) -> Result { - let repository_path = repository_path.as_ref(); + pub fn load(repository_path: &VfsPath) -> Result { if !repository_path.exists() { - let mut index = Index::new(repository_path); - index.save()?; + let mut index = Index::new()?; + index.save(repository_path)?; } let lock = Lock::lock(repository_path)?; - let index = Index::load_from_file(&Index::index_file_path_for_repository_path(repository_path))?; + let index_file_path = &Index::index_file_path_for_repository_path(repository_path)?; + let index = Index::load_from_file(index_file_path)?; lock.release()?; log::debug!( - "[{}] loaded index from {}, version: {}", + "[{}] loaded index from {}, version: {}; {} items", getpid(), - repository_path.to_string_lossy(), - index.version + index_file_path.as_str(), + index.version, + index.newest_items_by_source_path.len() ); Ok(index) } - pub fn save(&mut self) -> Result<()> { + pub fn save(&mut self, repository_path: &VfsPath) -> Result<()> { let lock_id = Uuid::new_v4(); - let lock = Lock::lock(&self.index_directory()?)?; - if self.index_file_path().exists() { - let index = Index::load_from_file(&Index::index_file_path_for_repository_path(&self.index_directory()?))?; + let lock = Lock::lock(repository_path)?; + + let index_file_path = &Index::index_file_path_for_repository_path(repository_path)?; + if index_file_path.exists() { + let index = Index::load_from_file(&Index::index_file_path_for_repository_path(repository_path)?)?; self.merge_items_by_file_id(index.items_by_file_id); self.merge_newest_items(index.newest_items_by_source_path); self.version = max(self.version, index.version); } self.version = self.version.next(); - self.write_index_to_file(&self.index_file_path())?; + self.write_index_to_file(index_file_path)?; lock.release()?; - log::debug!("[{}] saved index version {} with lock id {}", getpid(), self.version, lock_id,); + log::debug!( + "[{}] saved index version {} with lock id {} to {}; {} items", + getpid(), + self.version, + lock_id, + index_file_path.as_str(), + self.newest_items_by_source_path.len() + ); Ok(()) } - fn write_index_to_file(&mut self, path: &Path) -> Result<()> { - fs::create_dir_all( - path.parent() - .ok_or_else(|| anyhow!("cannot compute parent path for {}", path.to_string_lossy()))?, - ) - .context("create index directory")?; + fn write_index_to_file(&mut self, index_file_path: &VfsPath) -> Result<()> { + let parent = index_file_path.parent(); + match parent { + None => Err(anyhow!(format!("cannot get parent for {}", index_file_path.as_str()))), + Some(parent) => Ok(parent + .create_dir_all() + .context(format!("create index directory at {}", index_file_path.as_str()))?), + }?; - let file = AtomicFile::new(&path, AllowOverwrite); - - file.write(|f| { - let contents = serde_json::to_string(&self)?; - f.write_all(contents.as_bytes()) - }) - .context("writing index to disk")?; - - Ok(()) + let contents; + { + let mut file = index_file_path.create_file()?; + contents = serde_json::to_string(&self)?; + file.write_all(contents.as_bytes()).context("writing index to disk")?; + file.flush()?; + } + let readback = index_file_path.read_to_string()?; + if readback != contents { + Err(anyhow!("index readback incorrect")) + } else { + Ok(()) + } } - fn index_file_path(&self) -> PathBuf { - Path::new(&self.index_path).to_path_buf() - } + fn load_from_file(index_file_path: &VfsPath) -> Result { + let index_text = index_file_path + .read_to_string() + .context(format!("reading index file contents from {}", index_file_path.as_str()))?; - fn load_from_file>(index_file_path: T) -> Result { - let path_text = format!("{}", index_file_path.as_ref().to_string_lossy()); - let index_text = - fs::read_to_string(path_text.clone()).context(format!("reading index file contents from {}", path_text))?; - - let mut index: Index = serde_json::from_str(&index_text).context(format!("cannot read index from: {}", index_text))?; - index.index_path = path_text; + let index: Index = serde_json::from_str(&index_text).context(format!("cannot read index from: {}", index_text))?; Ok(index) } @@ -97,16 +106,8 @@ impl Index { self.items_by_file_id.extend(old_items_by_file_id); } - fn index_file_path_for_repository_path(path: &Path) -> PathBuf { - path.join("index") - } - - fn index_directory(&self) -> Result { - Ok(self - .index_file_path() - .parent() - .ok_or_else(|| anyhow!("cannot compute parent path for {}", self.index_file_path().to_string_lossy()))? - .to_path_buf()) + fn index_file_path_for_repository_path(path: &VfsPath) -> Result { + Ok(path.join("index")?) } } @@ -114,14 +115,15 @@ impl Index { mod must { use crate::index::Index; use anyhow::Result; + use vfs::{MemoryFS, VfsPath}; #[test] fn have_version_increased_when_saved() -> Result<()> { - let temp_dir = tempfile::tempdir()?; - let mut index = Index::new(&temp_dir.into_path()); + let temp_dir: VfsPath = MemoryFS::new().into(); + let mut index = Index::new()?; let old_version = index.version; - index.save()?; + index.save(&temp_dir)?; let new_version = index.version; diff --git a/src/index/item.rs b/src/index/item.rs index 92d9209..eb3fe3f 100644 --- a/src/index/item.rs +++ b/src/index/item.rs @@ -50,8 +50,8 @@ impl IndexItem { impl From for IndexItem { fn from(i: RepositoryItem) -> Self { IndexItem { - relative_path: i.relative_path().to_string_lossy().to_string(), - original_source_path: i.original_source_path().to_string_lossy().to_string(), + relative_path: i.relative_path().to_string(), + original_source_path: i.original_source_path().to_string(), id: i.id().clone(), version: *i.version(), } diff --git a/src/index/lock.rs b/src/index/lock.rs index ce6d727..96d4c21 100644 --- a/src/index/lock.rs +++ b/src/index/lock.rs @@ -1,26 +1,22 @@ use anyhow::Result; -use anyhow::*; -use atomicwrites::{AtomicFile, DisallowOverwrite}; -use glob::{glob, Paths}; use std::io::Write; -use std::path::{Path, PathBuf}; use uuid::Uuid; +use vfs::VfsPath; use rand::{rngs::OsRng, RngCore}; use std::{thread, time}; pub struct Lock { - path: PathBuf, + path: VfsPath, } impl Lock { - pub fn lock>(index_directory: T) -> Result { - let index_directory = index_directory.as_ref(); + pub fn lock(index_directory: &VfsPath) -> Result { let mut buffer = [0u8; 16]; OsRng.fill_bytes(&mut buffer); let id = Uuid::from_bytes(buffer); Lock::wait_to_have_sole_lock(id, index_directory)?; - let path = Lock::lock_file_path(index_directory, id); + let path = Lock::lock_file_path(index_directory, id)?; Ok(Lock { path }) } @@ -31,16 +27,16 @@ impl Lock { fn delete_lock_file(&self) -> Result<()> { if self.path.exists() { - std::fs::remove_file(&self.path)?; + self.path.remove_file()?; } Ok(()) } - fn wait_to_have_sole_lock(lock_id: Uuid, index_directory: &Path) -> Result<()> { + fn wait_to_have_sole_lock(lock_id: Uuid, index_directory: &VfsPath) -> Result<()> { Lock::create_lock_file(lock_id, index_directory)?; while !Lock::sole_lock(lock_id, index_directory)? { - let path = Lock::lock_file_path(index_directory, lock_id); - std::fs::remove_file(path)?; + let path = Lock::lock_file_path(index_directory, lock_id)?; + path.remove_file()?; let sleep_duration = time::Duration::from_millis((OsRng.next_u32() % 256).into()); thread::sleep(sleep_duration); Lock::create_lock_file(lock_id, index_directory)?; @@ -48,13 +44,12 @@ impl Lock { Ok(()) } - fn sole_lock(lock_id: Uuid, index_directory: &Path) -> Result { - let my_lock_file_path = Lock::lock_file_path(index_directory, lock_id); + fn sole_lock(lock_id: Uuid, index_directory: &VfsPath) -> Result { + let my_lock_file_path = Lock::lock_file_path(index_directory, lock_id)?; let locks = Lock::all_locks(index_directory)?; let mut only_mine = true; for path in locks { - let path = path?; - if path.to_string_lossy() != my_lock_file_path.to_string_lossy() { + if path != my_lock_file_path { only_mine = false; break; } @@ -62,27 +57,25 @@ impl Lock { Ok(only_mine) } - fn all_locks(index_directory: &Path) -> Result { - let locks_glob = Lock::locks_glob(index_directory); - Ok(glob(&locks_glob)?) + fn all_locks(index_directory: &VfsPath) -> Result> { + Ok(index_directory + .read_dir()? + .into_iter() + .filter(|f| f.filename().ends_with(".lock")) + .collect()) } - fn create_lock_file(lock_id: Uuid, index_directory: &Path) -> Result<()> { - let lock_file_path = Lock::lock_file_path(index_directory, lock_id); - let file = AtomicFile::new(lock_file_path, DisallowOverwrite); - match file.write(|f| f.write_all(lock_id.to_hyphenated().to_string().as_bytes())) { - Ok(_) => Ok(()), - Err(e) => Err(anyhow!("error acquiring lock: {}", e)), - } + fn create_lock_file(lock_id: Uuid, index_directory: &VfsPath) -> Result<()> { + let lock_file_path = Lock::lock_file_path(index_directory, lock_id)?; + let mut file = lock_file_path.create_file()?; + let lock_id_text = lock_id.to_hyphenated().to_string(); + let lock_id_bytes = lock_id_text.as_bytes(); + Ok(file.write_all(lock_id_bytes)?) } - fn lock_file_path(path: &Path, lock_id: Uuid) -> PathBuf { - let path_text = &format!("{}/{}.lock", path.to_string_lossy(), lock_id); - Path::new(path_text).to_path_buf() - } - - fn locks_glob(path: &Path) -> String { - format!("{}/*.lock", path.to_string_lossy()) + fn lock_file_path(path: &VfsPath, lock_id: Uuid) -> Result { + let file_name = format!("{}.lock", lock_id); + Ok(path.join(&file_name)?) } } @@ -96,19 +89,17 @@ impl Drop for Lock { mod must { use super::Lock; use anyhow::Result; - use std::{fs, io}; + use vfs::{MemoryFS, VfsPath}; #[test] fn be_released_when_dropped() -> Result<()> { - let temp_dir = tempfile::tempdir()?; + let temp_dir: VfsPath = MemoryFS::new().into(); { - let _lock = Lock::lock(&temp_dir.path()); + let _lock = Lock::lock(&temp_dir); } - let entries = fs::read_dir(temp_dir.into_path())? - .map(|res| res.map(|e| e.path())) - .collect::, io::Error>>()?; + let entries = temp_dir.read_dir()?.count(); - assert_eq!(entries.len(), 0); + assert_eq!(entries, 0); Ok(()) } } diff --git a/src/index/mod.rs b/src/index/mod.rs index a8eeb10..3fd3521 100644 --- a/src/index/mod.rs +++ b/src/index/mod.rs @@ -1,52 +1,44 @@ use std::collections::hash_map::Iter; use std::collections::HashMap; -use std::path::Path; use serde::{Deserialize, Serialize}; +use vfs::VfsPath; use crate::index::item::IndexItem; -use crate::repository::{item::RepositoryItem, ItemId}; +use crate::repository::ItemId; use crate::version::Version; use anyhow::Result; -use anyhow::*; mod io; -mod item; +pub mod item; mod lock; -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Debug)] pub struct Index { newest_items_by_source_path: HashMap, items_by_file_id: HashMap, - index_path: String, - repository_path: String, version: Version, } impl Index { - pub fn new>(repository_path: T) -> Self { - let repository_path = repository_path.as_ref(); - Index { + pub fn new() -> Result { + Ok(Index { newest_items_by_source_path: Default::default(), items_by_file_id: Default::default(), - index_path: repository_path.join("index").to_string_lossy().to_string(), - repository_path: repository_path.to_string_lossy().to_string(), version: Version::default(), - } + }) } - pub fn remember, R: AsRef>(&mut self, original_source_path: S, relative_path: R, id: ItemId) { - let original_source_path = original_source_path.as_ref(); - let relative_path = relative_path.as_ref(); + pub fn remember(&mut self, original_source_path: &VfsPath, relative_path: &str, id: ItemId) { let item = if let Some(old) = self .newest_items_by_source_path - .get(&original_source_path.to_string_lossy().to_string()) + .get(&original_source_path.as_str().to_string()) { - old.next_version(id, relative_path.to_string_lossy().to_string()) + old.next_version(id, relative_path.to_string()) } else { IndexItem::from( - original_source_path.to_string_lossy().to_string(), - relative_path.to_string_lossy().to_string(), + original_source_path.as_str().to_string(), + relative_path.to_string(), id, Version::default(), ) @@ -54,34 +46,11 @@ impl Index { self.items_by_file_id.insert(item.id(), item.clone()); self.newest_items_by_source_path - .insert(original_source_path.to_string_lossy().to_string(), item); + .insert(original_source_path.as_str().to_string(), item); } - pub fn repository_item(&self, i: &IndexItem) -> RepositoryItem { - let index_item = i.clone(); - let relative_path = Path::new(index_item.relative_path()); - let repository_path = Path::new(&self.repository_path); - let original_source_path = Path::new(index_item.original_source_path()); - let absolute_path = repository_path.join(relative_path); - let absolute_path = absolute_path.as_path(); - RepositoryItem::from( - original_source_path, - absolute_path, - relative_path, - index_item.id(), - index_item.version(), - ) - } - - pub fn newest_item_by_source_path>(&self, path: T) -> Result> { - let path = path.as_ref(); - if !path.is_absolute() { - return Err(anyhow!("repository path not absolute")); - } - Ok(self - .newest_items_by_source_path - .get(&path.to_string_lossy().to_string()) - .cloned()) + pub fn newest_item_by_source_path(&self, path: &VfsPath) -> Result> { + Ok(self.newest_items_by_source_path.get(&path.as_str().to_string()).cloned()) } pub fn item_by_id(&self, id: &ItemId) -> Result> { @@ -95,6 +64,7 @@ impl Index { } } +#[derive(Debug)] pub struct IndexItemIterator<'a> { iterator: Iter<'a, String, IndexItem>, } diff --git a/src/repository/item.rs b/src/repository/item.rs index 2370c39..01a8ecf 100644 --- a/src/repository/item.rs +++ b/src/repository/item.rs @@ -1,58 +1,67 @@ use crate::{repository::ItemId, version::Version}; use anyhow::Result; use anyhow::*; +use nix::unistd::getpid; +use std::fmt; use std::fmt::{Display, Formatter}; use std::path::Path; -use std::{fmt, fs}; +use vfs::VfsPath; -#[derive(Clone, Debug, PartialOrd, PartialEq, Ord, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct RepositoryItem { - relative_path: Box, - absolute_path: Box, - original_source_path: Box, + relative_path: String, + absolute_path: VfsPath, + original_source_path: String, id: ItemId, version: Version, } +impl PartialOrd for RepositoryItem { + fn partial_cmp(&self, other: &Self) -> Option { + self.id.partial_cmp(&other.id) + } +} + impl RepositoryItem { - pub fn from(original_source_path: &Path, absolute_path: &Path, relative_path: &Path, id: ItemId, version: Version) -> Self { + pub fn from( + original_source_path: &str, + absolute_path: &VfsPath, + relative_path: &str, + id: ItemId, + version: Version, + ) -> Self { RepositoryItem { - relative_path: Box::from(relative_path), - absolute_path: Box::from(absolute_path), - original_source_path: Box::from(original_source_path), + relative_path: relative_path.to_string(), + absolute_path: absolute_path.clone(), + original_source_path: original_source_path.to_string(), id, version, } } - pub fn save(&self, save_to: &Path) -> Result<()> { - if !save_to.is_absolute() { - return Err(anyhow!("path to store not absolute")); - } - - let target_path = save_to.join(&self.original_source_path.strip_prefix("/")?); - if !target_path.is_absolute() { - return Err(anyhow!("path to store not absolute")); - } + pub fn save(&self, save_to: &VfsPath) -> Result<()> { + let original_source_path = Path::new(self.original_source_path()); + let source_path_relative = original_source_path.strip_prefix("/")?; + let source_path_relative = source_path_relative.to_string_lossy(); + let target_path = save_to.join(&source_path_relative)?; let parent = target_path .parent() - .ok_or_else(|| anyhow!("cannot compute parent path for {}", &target_path.to_string_lossy()))?; - if !parent.exists() { - fs::create_dir_all(parent)?; - } + .ok_or_else(|| anyhow!("cannot compute parent path for {}", &target_path.as_str()))?; + log::debug!("[{}] saving data to {}", getpid(), target_path.as_str()); + parent.create_dir_all()?; if !self.absolute_path.exists() { return Err(anyhow!("corrupted repository")); } - fs::copy(&self.absolute_path, &target_path)?; + self.absolute_path.copy_file(&target_path)?; Ok(()) } - pub fn relative_path(&self) -> &Path { + pub fn relative_path(&self) -> &str { &self.relative_path } - pub fn original_source_path(&self) -> &Path { + pub fn original_source_path(&self) -> &str { &self.original_source_path } @@ -67,11 +76,6 @@ impl RepositoryItem { impl Display for RepositoryItem { fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!( - f, - "'{}' : {}", - self.original_source_path().to_string_lossy(), - hex::encode(self.id()) - ) + write!(f, "'{}' : {}", self.original_source_path(), hex::encode(self.id())) } } diff --git a/src/repository/mod.rs b/src/repository/mod.rs index cd99d8f..101121b 100644 --- a/src/repository/mod.rs +++ b/src/repository/mod.rs @@ -1,10 +1,9 @@ pub mod item; -use std::fmt::Formatter; -use std::fs::File; +use std::fmt::{Debug, Formatter}; use std::io::BufReader; -use std::path::{Path, PathBuf}; -use std::{fmt, fs, io}; +use std::path::Path; +use std::{fmt, io}; use crate::index::{Index, IndexItemIterator}; use anyhow::Result; @@ -13,28 +12,31 @@ use item::RepositoryItem; use serde::{Deserialize, Serialize}; use sha2::Digest; use sha2::Sha512; -use walkdir::WalkDir; +use vfs::{VfsFileType, VfsPath}; /// represents a place where backup is stored an can be restored from. /// right now only on-disk directory storage is supported /// repository always knows the newest version of the index and is responsible for syncing the index to disk /// and making sure that different threads can access index in parallel +#[derive(Debug)] pub struct Repository { - /// absolute path to where the repository is stored on disk - path: PathBuf, + /// path to where the repository is stored on disk + path: VfsPath, index: Index, } const DATA_DIR_NAME: &str = "data"; -#[derive(Clone, Debug, PartialOrd, PartialEq, Ord, Eq, Serialize, Deserialize, Hash)] +#[derive(Clone, PartialOrd, PartialEq, Ord, Eq, Serialize, Deserialize, Hash)] pub struct ItemId(#[serde(with = "base64")] Vec); +#[derive(Debug)] pub struct RepositoryItemIterator<'a> { + repository: &'a Repository, iterator: IndexItemIterator<'a>, - index: &'a Index, } +//TODO: move to serializers::base64 mod base64 { use ::base64; use serde::{de, Deserialize, Deserializer, Serializer}; @@ -59,7 +61,11 @@ impl<'a> Iterator for RepositoryItemIterator<'a> { type Item = RepositoryItem; fn next(&mut self) -> Option { - self.iterator.next().map(|i| self.index.repository_item(&i)) + let item = self.iterator.next(); + match item { + None => None, + Some(item) => self.repository.repository_item(&item).ok(), + } } } @@ -81,91 +87,115 @@ impl fmt::Display for ItemId { } } +impl Debug for ItemId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", hex::encode(self)) + } +} + impl<'a> Repository { - pub fn init>(path: T) -> Result<()> { - let mut index = Index::new(path.as_ref()); - index.save()?; - Ok(()) + pub fn init(path: &VfsPath) -> Result { + path.create_dir_all()?; + let mut index = Index::new()?; + index.save(path)?; + let repository = Repository::open(path)?; + repository.data_dir()?.create_dir_all()?; + Ok(repository) } - pub fn open>(path: T) -> Result { - let path = path.as_ref(); - if !path.is_absolute() { - return Err(anyhow!("path to repository not absolute")); - } - + pub fn open(path: &VfsPath) -> Result { let index = Index::load(path)?; - let mut repository_path = PathBuf::new(); - repository_path.push(path); - Ok(Repository { - path: repository_path, + let repository = Repository { + path: path.clone(), index, - }) + }; + + Ok(repository) } - pub fn path(&self) -> &Path { + pub fn path(&self) -> &VfsPath { &self.path } pub fn save_index(&mut self) -> Result<()> { - self.index.save() + self.index.save(&self.path) } - pub fn store(&mut self, source_path: &Path) -> Result<()> { - if !source_path.is_absolute() { - return Err(anyhow!("path to store not absolute")); - } + pub fn store(&mut self, source_path: &VfsPath) -> Result<()> { let id = Repository::calculate_id(source_path)?; - let destination_path = self.data_dir(); - let destination_path = destination_path.join(id.to_string()); - let destination_path = Path::new(&destination_path); + let destination = self.data_dir()?; + let destination = destination.join(&id.to_string())?; - if source_path.is_file() { - let parent = destination_path - .parent() - .ok_or_else(|| anyhow!("cannot compute parent path for {}", &destination_path.to_string_lossy()))?; - fs::create_dir_all(parent)?; - fs::copy(source_path, destination_path)?; - let relative_path = destination_path.strip_prefix(&self.path)?; - self.index.remember(source_path, relative_path, id); + if source_path.metadata()?.file_type != VfsFileType::File { + return Ok(()); } + let parent = destination + .parent() + .ok_or_else(|| anyhow!("cannot compute parent path for {}", &destination.as_str()))?; + parent.create_dir_all()?; + if !destination.exists() { + source_path.copy_file(&destination)?; + } + let destination_path = Path::new(destination.as_str()); + let relative_path = destination_path.strip_prefix(&self.path.as_str())?.to_string_lossy(); + self.index.remember(source_path, &relative_path, id); Ok(()) } - pub fn newest_item_by_source_path(&self, path: &Path) -> Result> { - Ok(self - .index - .newest_item_by_source_path(path)? - .map(|i| self.index.repository_item(&i))) + pub fn newest_item_by_source_path(&self, path: &VfsPath) -> Result> { + let item = self.index.newest_item_by_source_path(path)?; + match item { + None => Ok(None), + Some(item) => Ok(Some(self.repository_item(&item)?)), + } } pub fn item_by_id(&self, id: &ItemId) -> Result> { - self.index.item_by_id(id).map(|i| i.map(|i| self.index.repository_item(&i))) + let item = self.index.item_by_id(id)?; + match item { + None => Ok(None), + Some(item) => Ok(Some(self.repository_item(&item)?)), + } } pub fn newest_items(&self) -> RepositoryItemIterator { RepositoryItemIterator { + repository: &self, iterator: self.index.newest_items(), - index: &self.index, } } + pub fn repository_item(&self, i: &crate::index::item::IndexItem) -> Result { + let index_item = i.clone(); + let relative_path = index_item.relative_path(); + let repository_path = self.path(); + let original_source_path = index_item.original_source_path(); + let absolute_path = repository_path.join(relative_path)?; + Ok(RepositoryItem::from( + &original_source_path, + &absolute_path, + relative_path, + index_item.id(), + index_item.version(), + )) + } + pub fn data_weight(&self) -> Result { - let total_size = WalkDir::new(self.data_dir()) - .into_iter() + let walkdir = self.data_dir()?.walk_dir()?; + let total_size = walkdir .filter_map(|entry| entry.ok()) .filter_map(|entry| entry.metadata().ok()) - .filter(|metadata| metadata.is_file()) - .fold(0, |acc, m| acc + m.len()); + .filter(|metadata| metadata.file_type == VfsFileType::File) + .fold(0, |acc, m| acc + m.len); Ok(total_size) } - fn data_dir(&self) -> PathBuf { - self.path().join(DATA_DIR_NAME) + fn data_dir(&self) -> Result { + Ok(self.path().join(DATA_DIR_NAME)?) } - fn calculate_id(source_path: &Path) -> Result { - let source_file = File::open(source_path)?; + fn calculate_id(source_path: &VfsPath) -> Result { + let source_file = source_path.open_file()?; let mut reader = BufReader::new(source_file); let mut hasher = Sha512::new(); @@ -180,23 +210,23 @@ mod must { use super::Repository; use crate::test::source::TestSource; use anyhow::Result; - use tempfile::tempdir; + use vfs::MemoryFS; #[test] fn have_size_equal_to_sum_of_sizes_of_backed_up_files() -> Result<()> { let file_size1 = 13; let file_size2 = 27; let source = TestSource::new()?; - let repository_path = tempdir()?.into_path(); + let repository_path = MemoryFS::new().into(); Repository::init(&repository_path)?; let mut backup_repository = Repository::open(&repository_path)?; source.write_random_bytes_to_file("file1", file_size1)?; - backup_repository.store(&source.file_path("file1"))?; + backup_repository.store(&source.file_path("file1")?)?; source.write_random_bytes_to_file("file2", file_size2)?; - backup_repository.store(&source.file_path("file2"))?; + backup_repository.store(&source.file_path("file2")?)?; assert_eq!(file_size1 + file_size2, backup_repository.data_weight()?); Ok(()) diff --git a/src/restore.rs b/src/restore.rs index 4078767..1b699bf 100644 --- a/src/restore.rs +++ b/src/restore.rs @@ -1,24 +1,20 @@ -use std::path::Path; - use crate::repository::{item::RepositoryItem, Repository}; use anyhow::Result; -use anyhow::*; +use vfs::VfsPath; pub struct Engine<'a> { repository: &'a mut Repository, - target_path: &'a Path, + target_path: &'a VfsPath, } impl<'a> Engine<'a> { - pub fn new(repository: &'a mut Repository, target_path: &'a Path) -> Result { - if !target_path.is_absolute() { - return Err(anyhow!("path to store not absolute")); - } + pub fn new(repository: &'a mut Repository, target_path: &'a VfsPath) -> Result { Ok(Engine { repository, target_path }) } pub fn restore_all(&mut self) -> Result<()> { - for item in self.repository.newest_items() { + let newest_items = self.repository.newest_items(); + for item in newest_items { self.restore(&item)?; } self.repository.save_index()?; diff --git a/src/test/assertions.rs b/src/test/assertions.rs index 1350bb5..a2c6184 100644 --- a/src/test/assertions.rs +++ b/src/test/assertions.rs @@ -1,166 +1,180 @@ -use std::fs::File; -use std::io::Read; -use std::path::Path; +pub mod in_memory { + use std::path::Path; -use tempfile::tempdir; -use walkdir::WalkDir; - -use super::source::TestSource; -use crate::repository::{item::RepositoryItem, ItemId, Repository}; -use crate::{backup, restore}; -use anyhow::Result; - -pub fn assert_same_after_restore(source_path: &Path) -> Result<()> { - let repository_path = tempdir().unwrap().into_path(); - let restore_target = tempdir().unwrap().into_path(); - - assert_ne!(source_path, repository_path); - assert_ne!(repository_path, restore_target); - - Repository::init(repository_path.as_path())?; - { - let mut backup_repository = Repository::open(repository_path.as_path())?; - let mut backup_engine = backup::Engine::new(source_path, &mut backup_repository)?; - backup_engine.backup()?; - } - { - let mut restore_repository = Repository::open(repository_path.as_path())?; - let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; - restore_engine.restore_all()?; - } - - assert_directory_trees_have_same_contents(source_path, restore_target.as_path())?; - Ok(()) -} - -pub fn assert_restored_file_contents(repository_path: &Path, source_file_full_path: &Path, contents: &[u8]) -> Result<()> { - let mut restore_repository = Repository::open(repository_path)?; - let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?; - let restore_target = tempdir().unwrap(); - let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?; - - restore_engine.restore(&item.unwrap())?; - let restored_file_path = restore_target.path().join(source_file_full_path.strip_prefix("/")?); - assert_target_file_contents(&restored_file_path, contents) -} -pub fn assert_restored_file_byte_contents(repository_path: &Path, source_file_full_path: &Path, contents: &[u8]) -> Result<()> { - let mut restore_repository = Repository::open(repository_path)?; - let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?; - let restore_target = tempdir().unwrap(); - let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?; - - restore_engine.restore(&item.unwrap())?; - let restored_file_path = restore_target.path().join(source_file_full_path.strip_prefix("/")?); - assert_target_file_contents(&restored_file_path, contents) -} - -pub fn assert_restored_from_version_has_contents( - repository_path: &Path, - source_file_full_path: &Path, - old_contents: &[u8], - old_id: &ItemId, -) -> Result<()> { - let mut restore_repository = Repository::open(repository_path)?; - let old_item = restore_repository.item_by_id(&old_id)?; - let restore_target = tempdir().unwrap(); - let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?; - restore_engine.restore(&old_item.unwrap())?; - let restored_file_path = restore_target.path().join(source_file_full_path.strip_prefix("/")?); - assert_target_file_contents(&restored_file_path, old_contents) -} - -pub fn newest_item(repository_path: &Path, source_file_full_path: &Path) -> Result { - let item = { - let reading_repository = Repository::open(repository_path)?; - let item = reading_repository.newest_item_by_source_path(&source_file_full_path)?; - assert!(item.is_some()); - item.unwrap() + use crate::{ + backup, + repository::{item::RepositoryItem, ItemId, Repository}, + restore, + test::source::TestSource, }; - Ok(item) -} + use anyhow::Result; + use vfs::{MemoryFS, VfsFileType, VfsPath}; -pub fn restore_all_from_reloaded_repository(repository_path: &Path, restore_target: &Path) -> Result<()> { - { + use rand::Rng; + + pub fn random_in_memory_path(prefix: &str) -> Result { + let path: VfsPath = MemoryFS::new().into(); + let path = path.join(&format!("{}-{}", prefix, rand::thread_rng().gen::()))?; + Ok(path) + } + + pub fn assert_same_after_restore(source_path: &VfsPath) -> Result<()> { + let repository_path: VfsPath = random_in_memory_path("repository")?; + let restore_target: VfsPath = random_in_memory_path("target")?; + + assert_ne!(source_path, &repository_path); + assert_ne!(repository_path, restore_target); + + Repository::init(&repository_path)?; + { + let mut backup_repository = Repository::open(&repository_path)?; + let mut backup_engine = backup::Engine::new(source_path, &mut backup_repository)?; + backup_engine.backup()?; + } + { + let mut restore_repository = Repository::open(&repository_path)?; + + let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; + restore_engine.restore_all()?; + } + + assert_directory_trees_have_same_contents(source_path, &restore_target)?; + Ok(()) + } + + pub fn assert_restored_file_contents( + repository_path: &VfsPath, + source_file_full_path: &VfsPath, + contents: &[u8], + ) -> Result<()> { let mut restore_repository = Repository::open(repository_path)?; - let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; - restore_engine.restore_all()?; - Ok(()) + let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?; + let restore_target = random_in_memory_path("target")?; + let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; + + restore_engine.restore(&item.unwrap())?; + let source_file_relative_path = Path::new(source_file_full_path.as_str()).strip_prefix("/")?; + let restored_file_path = restore_target.join(&source_file_relative_path.to_string_lossy())?; + assert_target_file_contents(&restored_file_path, contents) } -} -pub fn backup_file_with_text_contents( - source: &TestSource, - repository_path: &Path, - source_file_relative_path: &str, - contents: &str, -) -> Result<()> { - { - backup_file_with_byte_contents(source, repository_path, source_file_relative_path, contents.as_bytes()) + pub fn assert_restored_from_version_has_contents( + repository_path: &VfsPath, + source_file_full_path: &VfsPath, + old_contents: &[u8], + old_id: &ItemId, + ) -> Result<()> { + let mut restore_repository = Repository::open(repository_path)?; + let old_item = restore_repository.item_by_id(&old_id)?; + let restore_target = random_in_memory_path("target")?; + let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; + restore_engine.restore(&old_item.unwrap())?; + let source_file_relative_path = Path::new(source_file_full_path.as_str()).strip_prefix("/")?; + let restored_file_path = restore_target.join(&source_file_relative_path.to_string_lossy())?; + assert_target_file_contents(&restored_file_path, old_contents) } -} -pub fn backup_file_with_byte_contents( - source: &TestSource, - repository_path: &Path, - source_file_relative_path: &str, - contents: &[u8], -) -> Result<()> { - { - let mut backup_repository = Repository::open(repository_path)?; - let mut backup_engine = backup::Engine::new(source.path(), &mut backup_repository)?; - source.write_bytes_to_file(source_file_relative_path, contents).unwrap(); - backup_engine.backup()?; - Ok(()) + pub fn newest_item(repository_path: &VfsPath, source_file_full_path: &VfsPath) -> Result { + let item = { + let reading_repository = Repository::open(repository_path)?; + let item = reading_repository.newest_item_by_source_path(&source_file_full_path)?; + assert!(item.is_some()); + item.unwrap() + }; + Ok(item) } -} -pub fn data_weight(repository_path: &Path) -> Result { - { - let repository = Repository::open(repository_path)?; - Ok(repository.data_weight()?) - } -} - -fn assert_directory_trees_have_same_contents(left: &Path, right: &Path) -> Result<()> { - let left_files = get_sorted_files_recursively(left)?; - let right_files = get_sorted_files_recursively(right)?; - - let pairs = left_files.iter().zip(right_files); - for (l, r) in pairs { - assert_eq!(l.file_name(), r.file_name()); - let mut fl = File::open(l).unwrap(); - let mut fr = File::open(r).unwrap(); - let mut bl = vec![]; - let mut br = vec![]; - fl.read_to_end(&mut bl).unwrap(); - fr.read_to_end(&mut br).unwrap(); - assert_eq!(bl, br); - } - Ok(()) -} - -pub fn get_sorted_files_recursively>(path: T) -> Result>> { - let walker = WalkDir::new(path.as_ref()).sort_by(|a, b| a.file_name().cmp(b.file_name())); - - let mut result = vec![]; - - for maybe_entry in walker { - let entry = maybe_entry?; - if entry.path() == path.as_ref() { - continue; - } - if entry.path().is_file() { - result.push(Box::from(entry.path())); + pub fn restore_all_from_reloaded_repository(repository_path: &VfsPath, restore_target: &VfsPath) -> Result<()> { + { + let mut restore_repository = Repository::open(repository_path)?; + let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; + restore_engine.restore_all()?; + Ok(()) } } - Ok(result) -} + pub fn backup_file_with_text_contents( + source: &TestSource, + repository_path: &VfsPath, + source_file_relative_path: &str, + contents: &str, + ) -> Result<()> { + { + backup_file_with_byte_contents(source, repository_path, source_file_relative_path, contents.as_bytes()) + } + } -fn assert_target_file_contents(restored_path: &Path, expected_contents: &[u8]) -> Result<()> { - let mut actual_contents = vec![]; - assert!(restored_path.exists(), "Expected '{}' to be there", restored_path.display()); - File::open(restored_path)?.read_to_end(&mut actual_contents)?; - assert_eq!(expected_contents, actual_contents); - Ok(()) + pub fn backup_file_with_byte_contents( + source: &TestSource, + repository_path: &VfsPath, + source_file_relative_path: &str, + contents: &[u8], + ) -> Result<()> { + { + let mut backup_repository = Repository::open(repository_path)?; + + let mut backup_engine = backup::Engine::new(source.path(), &mut backup_repository)?; + source.write_bytes_to_file(source_file_relative_path, contents).unwrap(); + backup_engine.backup()?; + Ok(()) + } + } + + pub fn data_weight(repository_path: &VfsPath) -> Result { + { + let repository = Repository::open(repository_path)?; + Ok(repository.data_weight()?) + } + } + + fn assert_directory_trees_have_same_contents(left: &VfsPath, right: &VfsPath) -> Result<()> { + let left_files = get_sorted_files_recursively(left)?; + let right_files = get_sorted_files_recursively(right)?; + + let pairs = left_files.iter().zip(right_files); + for (l, r) in pairs { + assert_eq!(l.filename(), r.filename()); + let mut fl = l.open_file()?; + let mut fr = r.open_file()?; + let mut bl = vec![]; + let mut br = vec![]; + fl.read_to_end(&mut bl).unwrap(); + fr.read_to_end(&mut br).unwrap(); + assert_eq!(bl, br); + } + Ok(()) + } + + pub fn get_sorted_files_recursively(path: &VfsPath) -> Result> { + assert!( + path.exists(), + "[get_sorted_files_recursively] invoked on a path that does not exist: {:?}", + path + ); + let walker = path.walk_dir()?; + + let mut result = vec![]; + + for maybe_entry in walker { + let entry = &maybe_entry?; + if entry == path { + continue; + } + if entry.metadata()?.file_type == VfsFileType::File { + result.push(entry.clone()); + } + } + + result.sort_by(|a, b| a.filename().cmp(&b.filename())); + + Ok(result) + } + + fn assert_target_file_contents(restored_path: &VfsPath, expected_contents: &[u8]) -> Result<()> { + let mut actual_contents = vec![]; + assert!(restored_path.exists(), "Expected '{:?}' to be there", restored_path); + restored_path.open_file()?.read_to_end(&mut actual_contents)?; + assert_eq!(expected_contents, actual_contents); + Ok(()) + } } diff --git a/src/test/source.rs b/src/test/source.rs index 999b195..6cd3c9b 100644 --- a/src/test/source.rs +++ b/src/test/source.rs @@ -1,45 +1,54 @@ -use std::fs::File; -use std::io::Error; use std::io::Write; -use std::path::Path; -use std::path::PathBuf; -use tempfile::tempdir; -use tempfile::TempDir; +use anyhow::Result; +use vfs::VfsPath; + +use super::assertions::in_memory::random_in_memory_path; pub struct TestSource { - directory: TempDir, + directory: VfsPath, } impl TestSource { - pub fn new() -> Result { - Ok(Self { directory: tempdir()? }) + pub fn new() -> Result { + let path: VfsPath = random_in_memory_path("testsource")?; + path.create_dir_all()?; + Ok(Self { directory: path }) } - pub fn write_bytes_to_file(&self, filename: &str, bytes: &[u8]) -> Result<(), Error> { - let path = self.file_path(filename); - Ok(File::create(path)?.write_all(bytes)?) + pub fn write_bytes_to_file(&self, filename: &str, bytes: &[u8]) -> Result<()> { + let path = self.file_path(filename)?; + let mut file = path.create_file()?; + file.write_all(bytes)?; + dbg!(format!("wrote bytes under {}", filename)); + Ok(()) } - pub fn write_text_to_file(&self, filename: &str, text: &str) -> Result<(), Error> { + pub fn write_text_to_file(&self, filename: &str, text: &str) -> Result<()> { self.write_bytes_to_file(filename, text.as_bytes()) } - pub fn write_random_bytes_to_file(&self, filename: &str, size: u64) -> Result<(), Error> { + pub fn write_random_bytes_to_file(&self, filename: &str, size: u64) -> Result<()> { let random_bytes: Vec = (0..size).map(|_| rand::random::()).collect(); self.write_bytes_to_file(filename, &random_bytes)?; Ok(()) } - pub fn path(&self) -> &Path { - self.directory.path() + pub fn path(&self) -> &VfsPath { + &self.directory } - pub fn file_path(&self, filename: &str) -> PathBuf { - self.directory.path().join(filename) + pub fn file_path(&self, filename: &str) -> Result { + let file_path = self.directory.join(filename)?; + Ok(file_path) } } +impl Drop for TestSource { + fn drop(&mut self) { + let _ = self.path().remove_dir_all(); + } +} #[cfg(test)] mod must { use super::TestSource; @@ -51,7 +60,7 @@ mod must { { let source = TestSource::new()?; source.write_random_bytes_to_file("somefile", 1)?; - path = source.path().to_path_buf(); + path = source.path().clone(); } assert!(!path.exists()); diff --git a/tests/concurrency_tests.rs b/tests/concurrency_tests.rs index 40dc114..6f29184 100644 --- a/tests/concurrency_tests.rs +++ b/tests/concurrency_tests.rs @@ -1,50 +1,65 @@ #[cfg(test)] mod must { - use std::fs; - use std::path::Path; - use anyhow::Result; - use bakare::repository::Repository; - use bakare::test::{assertions::*, source::TestSource}; + use bakare::test::source::TestSource; use bakare::{backup, restore}; - use nix::sys::wait::{waitpid, WaitStatus}; + use bakare::{repository::Repository, test::assertions::in_memory::*}; use nix::unistd::{fork, ForkResult}; - use tempfile::tempdir; + use nix::{ + sys::wait::{waitpid, WaitStatus}, + unistd::getpid, + }; + use vfs::{PhysicalFS, VfsPath}; + #[test] fn handle_concurrent_backups() -> Result<()> { setup_logger(); - let repository_path = &tempdir().unwrap().into_path(); - Repository::init(repository_path)?; + + let repository_directory = tempfile::tempdir()?.into_path(); + let repository_path: VfsPath = PhysicalFS::new(repository_directory).into(); + let repository_path = repository_path.join(&format!("repository-{}", getpid()))?; + Repository::init(&repository_path)?; let parallel_backups_number = 16; let files_per_backup_number = 16; let total_number_of_files = parallel_backups_number * files_per_backup_number; - let finished_backup_runs = backup_in_parallel(repository_path, parallel_backups_number, files_per_backup_number)?; - assert_eq!(finished_backup_runs.len(), parallel_backups_number); - let all_restored_files = restore_all(repository_path)?; + let finished_backup_runs = backup_in_parallel(&repository_path, parallel_backups_number, files_per_backup_number)?; + assert_eq!(finished_backup_runs.len(), parallel_backups_number); + assert!(data_weight(&repository_path)? > 0); + + let target_directory = tempfile::tempdir()?.into_path(); + let target_path: VfsPath = PhysicalFS::new(target_directory).into(); + let target_path = target_path.join(&format!("target-{}", getpid()))?; + let all_restored_files = restore_all(&repository_path, &target_path)?; assert_eq!(all_restored_files.len(), total_number_of_files); + assert_all_files_in_place(parallel_backups_number, files_per_backup_number, &all_restored_files)?; + Ok(()) + } + + fn assert_all_files_in_place( + parallel_backups_number: usize, + files_per_backup_number: usize, + all_restored_files: &[VfsPath], + ) -> Result<()> { for i in 0..parallel_backups_number { for j in 0..files_per_backup_number { let id = file_id(i, j); - let file = all_restored_files.iter().find(|f| f.ends_with(id.clone())); + let file = all_restored_files.iter().find(|f| f.filename() == id); assert!(file.unwrap().exists(), "file {:?} does not exist", file); - let contents = fs::read_to_string(file.unwrap()).unwrap(); + let contents = file.unwrap().read_to_string()?; assert_eq!(id.to_string(), contents.to_owned()); } } Ok(()) } - fn backup_in_parallel( - repository_path: T, + fn backup_in_parallel( + repository_path: &VfsPath, parallel_backups_number: usize, files_per_backup_number: usize, - ) -> Result> - where - T: AsRef + Sync, - { + ) -> Result> { let task_numbers = (0..parallel_backups_number).collect::>(); let mut child_pids = vec![]; for task_number in &task_numbers { @@ -73,11 +88,8 @@ mod must { Ok(task_numbers) } - fn backup_process(task_number: usize, repository_path: T, files_per_backup_number: usize) -> Result<()> - where - T: AsRef + Sync, - { - let mut repository = Repository::open(repository_path.as_ref())?; + fn backup_process(task_number: usize, repository_path: &VfsPath, files_per_backup_number: usize) -> Result<()> { + let mut repository = Repository::open(repository_path)?; let source = TestSource::new().unwrap(); let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?; for i in 0..files_per_backup_number { @@ -88,10 +100,9 @@ mod must { Ok(()) } - fn restore_all>(repository_path: T) -> Result>> { - let restore_target = tempdir().unwrap().into_path(); - let mut restore_repository = Repository::open(repository_path.as_ref())?; - let mut restore_engine = restore::Engine::new(&mut restore_repository, restore_target.as_ref())?; + fn restore_all(repository_path: &VfsPath, restore_target: &VfsPath) -> Result> { + let mut restore_repository = Repository::open(repository_path)?; + let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?; restore_engine.restore_all()?; get_sorted_files_recursively(&restore_target) } diff --git a/tests/deduplication_tests.rs b/tests/deduplication_tests.rs index b34284f..df4de28 100644 --- a/tests/deduplication_tests.rs +++ b/tests/deduplication_tests.rs @@ -1,17 +1,15 @@ #[cfg(test)] mod must { - use tempfile::tempdir; - - use bakare::repository::Repository; - use bakare::test::{assertions::*, source::TestSource}; + use bakare::test::assertions::in_memory::*; + use bakare::{repository::Repository, test::source::TestSource}; use proptest::prelude::*; proptest! { #[test] fn store_duplicated_files_just_once(contents in any::<[u8;3]>()) { let source = TestSource::new().unwrap(); - let repository_path = &tempdir().unwrap().into_path(); - Repository::init(repository_path).unwrap(); + let repository_path = random_in_memory_path("repository").unwrap(); + Repository::init(&repository_path).unwrap(); assert_eq!(data_weight(&repository_path).unwrap(), 0); backup_file_with_byte_contents(&source, &repository_path, "1", &contents).unwrap(); @@ -22,8 +20,8 @@ mod must { let second_weight = data_weight(&repository_path).unwrap(); assert_eq!(first_weight, second_weight); - assert_restored_file_contents(repository_path, &source.file_path("1"), &contents).unwrap(); - assert_restored_file_contents(repository_path, &source.file_path("2"), &contents).unwrap(); + assert_restored_file_contents(&repository_path, &source.file_path("1").unwrap(), &contents).unwrap(); + assert_restored_file_contents(&repository_path, &source.file_path("2").unwrap(), &contents).unwrap(); } } } diff --git a/tests/system_tests.rs b/tests/system_tests.rs index fcd0d87..5f43ac5 100644 --- a/tests/system_tests.rs +++ b/tests/system_tests.rs @@ -1,11 +1,9 @@ #[cfg(test)] mod must { - use tempfile::tempdir; - use anyhow::Result; use bakare::backup; - use bakare::repository::Repository; - use bakare::test::{assertions::*, source::TestSource}; + use bakare::test::assertions::in_memory::*; + use bakare::{repository::Repository, test::source::TestSource}; #[test] fn restore_multiple_files() -> Result<()> { @@ -15,15 +13,17 @@ mod must { source.write_text_to_file("second", "some contents").unwrap(); source.write_text_to_file("third", "some other contents").unwrap(); + dbg!("setup done"); assert_same_after_restore(source.path()) } #[test] fn restore_files_after_reopening_repository() -> Result<()> { let source = TestSource::new().unwrap(); - let repository_path = &tempdir().unwrap().into_path(); - let restore_target = tempdir().unwrap().into_path(); - Repository::init(repository_path)?; + let repository_path = random_in_memory_path("repository")?; + let restore_target = random_in_memory_path("target")?; + + Repository::init(&repository_path)?; let source_file_relative_path = "some file path"; let original_contents = "some old contents"; @@ -32,18 +32,18 @@ mod must { restore_all_from_reloaded_repository(&repository_path, &restore_target)?; - let source_file_full_path = &source.file_path(source_file_relative_path); - assert_restored_file_contents(repository_path, source_file_full_path, original_contents.as_bytes()) + let source_file_full_path = &source.file_path(source_file_relative_path)?; + assert_restored_file_contents(&repository_path, source_file_full_path, original_contents.as_bytes()) } #[test] fn restore_older_version_of_file() -> Result<()> { let source = TestSource::new().unwrap(); - let repository_path = tempdir().unwrap().into_path(); - Repository::init(repository_path.as_path())?; + let repository_path = random_in_memory_path("repository")?; + Repository::init(&repository_path)?; let source_file_relative_path = "some path"; - let source_file_full_path = source.file_path(source_file_relative_path); + let source_file_full_path = source.file_path(source_file_relative_path)?; let old_contents = "some old contents"; backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, old_contents)?; @@ -60,11 +60,11 @@ mod must { #[test] fn newer_version_should_be_greater_than_earlier_version() -> Result<()> { let source = TestSource::new().unwrap(); - let repository_path = tempdir().unwrap().into_path(); - Repository::init(repository_path.as_path())?; + let repository_path = random_in_memory_path("repository")?; + Repository::init(&repository_path)?; let source_file_relative_path = "some path"; - let source_file_full_path = source.file_path(source_file_relative_path); + let source_file_full_path = source.file_path(source_file_relative_path)?; backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old")?; @@ -84,24 +84,25 @@ mod must { #[test] fn restore_latest_version_by_default() -> Result<()> { let source = TestSource::new().unwrap(); - let repository_path = &tempdir().unwrap().into_path(); - Repository::init(repository_path)?; + let repository_path = random_in_memory_path("repository")?; + Repository::init(&repository_path)?; let source_file_relative_path = "some path"; backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old contents")?; backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newer contents")?; backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newest contents")?; - let source_file_full_path = &source.file_path(source_file_relative_path); - assert_restored_file_contents(repository_path, source_file_full_path, b"newest contents") + let source_file_full_path = &source.file_path(source_file_relative_path)?; + assert_restored_file_contents(&repository_path, source_file_full_path, b"newest contents") } #[test] fn forbid_backup_of_paths_within_repository() -> Result<()> { - let repository_path = &tempdir().unwrap().into_path(); - Repository::init(repository_path)?; - let mut repository = Repository::open(repository_path)?; - let error = backup::Engine::new(repository_path, &mut repository); + let repository_path = random_in_memory_path("repository")?; + Repository::init(&repository_path)?; + let mut repository = Repository::open(&repository_path)?; + + let error = backup::Engine::new(&repository_path, &mut repository); assert!(error.is_err()); Ok(()) }