delete vfs

This commit is contained in:
Cyryl Płotnicki 2021-05-16 19:15:07 +01:00
parent 12dce7e676
commit 20373325a9
14 changed files with 275 additions and 270 deletions

10
Cargo.lock generated
View file

@ -198,7 +198,6 @@ dependencies = [
"thiserror",
"two-rusty-forks",
"uuid",
"vfs",
"walkdir",
]
@ -1453,15 +1452,6 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe"
[[package]]
name = "vfs"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "feb0df0abbe81534013b326c5e3d723a2c9b06c04160fa0f7d8b4d42eb9b7052"
dependencies = [
"thiserror",
]
[[package]]
name = "wait-timeout"
version = "0.2.0"

View file

@ -29,7 +29,6 @@ sha2 = "0.9"
tempfile = "3.2"
thiserror = "1.0"
uuid = { version = "0.8", features = ["v4"] }
vfs = "0.5"
walkdir = "2.3"
[dev-dependencies]

View file

@ -1,40 +1,35 @@
use std::path::Path;
use crate::repository::Repository;
use anyhow::Result;
use anyhow::*;
use vfs::VfsPath;
use walkdir::WalkDir;
pub struct Engine<'a> {
source_path: &'a VfsPath,
source_path: &'a Path,
repository: &'a mut Repository,
}
impl<'a> Engine<'a> {
pub fn new(source_path: &'a VfsPath, repository: &'a mut Repository) -> Result<Self> {
pub fn new(source_path: &'a Path, repository: &'a mut Repository) -> Result<Self> {
let mut ancestors = vec![];
let mut current = Some(source_path.clone());
let mut current = Some(source_path.to_path_buf());
while let Some(path) = current {
ancestors.push(path.clone());
current = path.parent();
ancestors.push(path.to_path_buf());
current = path.parent().map(|p| p.to_path_buf());
}
if ancestors.into_iter().any(|a| &a == repository.path()) {
if ancestors.into_iter().any(|a| a == repository.path()) {
return Err(anyhow!("source same as repository"));
}
Ok(Engine { source_path, repository })
}
pub fn backup(&mut self) -> Result<()> {
let walker = self.source_path.walk_dir()?;
let save_every = 16;
let mut save_counter = 0;
let walker = WalkDir::new(self.source_path);
for maybe_entry in walker {
let entry = maybe_entry?;
if &entry != self.source_path {
self.repository.store(&entry)?;
}
save_counter += 1;
if save_counter == save_every {
save_counter = 0;
self.repository.save_index()?;
if entry.path() != self.source_path {
self.repository.store(&entry.path())?;
}
}
self.repository.save_index()?;

View file

@ -1,5 +1,9 @@
use std::collections::HashMap;
use vfs::VfsPath;
use std::{
collections::HashMap,
fs::{self, File},
io::Read,
path::{Path, PathBuf},
};
use uuid::Uuid;
@ -14,8 +18,8 @@ use nix::unistd::getpid;
use std::{cmp::max, io::Write};
impl Index {
pub fn load(repository_path: &VfsPath) -> Result<Self> {
if !repository_path.exists()? {
pub fn load(repository_path: &Path) -> Result<Self> {
if !repository_path.exists() {
let mut index = Index::new()?;
index.save(repository_path)?;
}
@ -26,19 +30,19 @@ impl Index {
log::debug!(
"[{}] loaded index from {}, version: {}; {} items",
getpid(),
index_file_path.as_str(),
index_file_path.to_string_lossy(),
index.version,
index.newest_items_by_source_path.len()
);
Ok(index)
}
pub fn save(&mut self, repository_path: &VfsPath) -> Result<()> {
pub fn save(&mut self, repository_path: &Path) -> Result<()> {
let lock_id = Uuid::new_v4();
let lock = Lock::lock(repository_path)?;
let index_file_path = &Index::index_file_path_for_repository_path(repository_path)?;
if index_file_path.exists()? {
if index_file_path.exists() {
let index = Index::load_from_file(&Index::index_file_path_for_repository_path(repository_path)?)?;
self.merge_items_by_file_id(index.items_by_file_id);
self.merge_newest_items(index.newest_items_by_source_path);
@ -52,20 +56,21 @@ impl Index {
getpid(),
self.version,
lock_id,
index_file_path.as_str(),
index_file_path.to_string_lossy(),
self.newest_items_by_source_path.len()
);
Ok(())
}
fn write_index_to_file(&mut self, index_file_path: &VfsPath) -> Result<()> {
fn write_index_to_file(&mut self, index_file_path: &Path) -> Result<()> {
let parent = index_file_path.parent();
match parent {
None => Err(anyhow!(format!("cannot get parent for {}", index_file_path.as_str()))),
Some(parent) => Ok(parent
.create_dir_all()
.context(format!("create index directory at {}", index_file_path.as_str()))?),
}?;
None => Err(anyhow!(format!(
"cannot get parent for {}",
index_file_path.to_string_lossy()
))),
Some(parent) => Ok(fs::create_dir_all(parent)),
}??;
let serialised = serde_json::to_string(&self)?;
@ -73,13 +78,13 @@ impl Index {
let encoded = error_correcting_encoder::encode(bytes)?;
{
let mut file = index_file_path.create_file()?;
let mut file = File::create(index_file_path)?;
file.write_all(&encoded).context("writing index to disk")?;
file.flush()?;
}
let readback = {
let mut file = index_file_path.open_file()?;
let mut file = File::open(index_file_path)?;
let mut readback = vec![];
file.read_to_end(&mut readback)?;
readback
@ -92,16 +97,16 @@ impl Index {
}
}
fn load_from_file(index_file_path: &VfsPath) -> Result<Self> {
let mut file = index_file_path.open_file()?;
fn load_from_file(index_file_path: &Path) -> Result<Self> {
let mut file = File::open(index_file_path)?;
let mut encoded = vec![];
file.read_to_end(&mut encoded)?;
let decoded = error_correcting_encoder::decode(&encoded)?;
let index_text = String::from_utf8(decoded)?;
let index: Index =
serde_json::from_str(&index_text).context(format!("cannot read index from: {}", index_file_path.as_str()))?;
let index: Index = serde_json::from_str(&index_text)
.context(format!("cannot read index from: {}", index_file_path.to_string_lossy()))?;
Ok(index)
}
@ -121,8 +126,8 @@ impl Index {
self.items_by_file_id.extend(old_items_by_file_id);
}
fn index_file_path_for_repository_path(path: &VfsPath) -> Result<VfsPath> {
Ok(path.join("index")?)
fn index_file_path_for_repository_path(path: &Path) -> Result<PathBuf> {
Ok(path.join("index"))
}
}
@ -130,16 +135,16 @@ impl Index {
mod must {
use crate::index::Index;
use anyhow::Result;
use vfs::{MemoryFS, VfsPath};
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[test]
fn have_version_increased_when_saved() -> Result<()> {
let temp_dir: VfsPath = MemoryFS::new().into();
let temp_dir = tempdir()?;
let mut index = Index::new()?;
let old_version = index.version;
index.save(&temp_dir)?;
index.save(&temp_dir.path())?;
let new_version = index.version;
@ -150,11 +155,11 @@ mod must {
#[test]
fn be_same_when_loaded_from_disk() -> Result<()> {
let repository_path: VfsPath = MemoryFS::new().into();
let repository_path = tempdir()?;
let mut original = Index::new()?;
original.save(&repository_path)?;
let loaded = Index::load(&repository_path)?;
original.save(&repository_path.path())?;
let loaded = Index::load(&repository_path.path())?;
assert_eq!(original, loaded);

View file

@ -1,26 +1,31 @@
use anyhow::Result;
use anyhow::*;
use fail::fail_point;
use std::{io::Write, time::Instant};
use std::{
fs::{remove_file, File},
io::Write,
path::{Path, PathBuf},
time::Instant,
};
use uuid::Uuid;
use vfs::VfsPath;
use walkdir::WalkDir;
use rand::{rngs::OsRng, RngCore};
use std::{thread, time};
pub struct Lock {
path: VfsPath,
path: PathBuf,
}
const MAX_TIMEOUT_MILLIS: u16 = 8192;
const FILE_EXTENSION: &str = ".lock";
impl Lock {
pub fn lock(index_directory: &VfsPath) -> Result<Self> {
pub fn lock(index_directory: &Path) -> Result<Self> {
Lock::lock_with_timeout(index_directory, MAX_TIMEOUT_MILLIS)
}
pub fn lock_with_timeout(index_directory: &VfsPath, max_timeout_millis: u16) -> Result<Self> {
pub fn lock_with_timeout(index_directory: &Path, max_timeout_millis: u16) -> Result<Self> {
let mut buffer = [0u8; 16];
OsRng.fill_bytes(&mut buffer);
let id = Uuid::from_bytes(buffer);
@ -35,23 +40,26 @@ impl Lock {
}
fn delete_lock_file(&self) -> Result<()> {
if self.path.exists()? {
self.path.remove_file()?;
if self.path.exists() {
remove_file(&self.path)?;
}
Ok(())
}
fn wait_to_have_sole_lock(lock_id: Uuid, index_directory: &VfsPath, max_timeout_millis: u16) -> Result<()> {
fn wait_to_have_sole_lock(lock_id: Uuid, index_directory: &Path, max_timeout_millis: u16) -> Result<()> {
let start_time = Instant::now();
let _ = Lock::create_lock_file(lock_id, index_directory);
while !Lock::sole_lock(lock_id, index_directory)? {
let path = Lock::lock_file_path(index_directory, lock_id)?;
if path.exists()? {
path.remove_file()?;
if path.exists() {
remove_file(path)?;
}
let sleep_duration = time::Duration::from_millis((OsRng.next_u32() % 64).into());
thread::sleep(sleep_duration);
// timeout will take care of permanent errors
let _ = Lock::create_lock_file(lock_id, index_directory);
if start_time.elapsed().as_millis() > max_timeout_millis.into() {
return Err(anyhow!("timed out waiting on lock"));
}
@ -59,42 +67,43 @@ impl Lock {
Ok(())
}
fn sole_lock(lock_id: Uuid, index_directory: &VfsPath) -> Result<bool> {
fn sole_lock(lock_id: Uuid, index_directory: &Path) -> Result<bool> {
let my_lock_file_path = Lock::lock_file_path(index_directory, lock_id)?;
let locks = Lock::all_locks(index_directory)?;
let mut only_mine = true;
for path in &locks {
if path != &my_lock_file_path {
only_mine = false;
break;
}
}
if locks.is_empty() {
let walker = WalkDir::new(index_directory);
let all_locks: Vec<_> = walker
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| e.file_name().to_string_lossy().ends_with(FILE_EXTENSION))
.collect();
if all_locks.len() != 1 {
return Ok(false);
}
Ok(only_mine)
}
fn all_locks(index_directory: &VfsPath) -> Result<Vec<VfsPath>> {
Ok(index_directory
.read_dir()?
let walker = WalkDir::new(index_directory);
let my_locks: Vec<_> = walker
.into_iter()
.filter(|f| f.filename().ends_with(FILE_EXTENSION))
.collect())
.filter_map(|e| e.ok())
.filter(|e| e.path() == my_lock_file_path)
.collect();
if my_locks.len() != 1 {
return Ok(false);
}
let result = all_locks.first().unwrap().path() == my_locks.first().unwrap().path();
Ok(result)
}
fn create_lock_file(lock_id: Uuid, index_directory: &VfsPath) -> Result<()> {
fn create_lock_file(lock_id: Uuid, index_directory: &Path) -> Result<()> {
let lock_file_path = Lock::lock_file_path(index_directory, lock_id)?;
fail_point!("create-lock-file", |e: Option<String>| Err(anyhow!(e.unwrap())));
let mut file = lock_file_path.create_file()?;
let mut file = File::create(lock_file_path)?;
let lock_id_text = lock_id.to_hyphenated().to_string();
let lock_id_bytes = lock_id_text.as_bytes();
Ok(file.write_all(lock_id_bytes)?)
}
fn lock_file_path(path: &VfsPath, lock_id: Uuid) -> Result<VfsPath> {
let file_name = format!("{}.{}", lock_id, FILE_EXTENSION);
Ok(path.join(&file_name)?)
fn lock_file_path(path: &Path, lock_id: Uuid) -> Result<PathBuf> {
let file_name = format!("{}{}", lock_id, FILE_EXTENSION);
Ok(path.join(&file_name))
}
}
@ -108,20 +117,22 @@ impl Drop for Lock {
mod must {
use super::Lock;
use anyhow::Result;
use pretty_assertions::assert_eq;
use tempfile::tempdir;
#[cfg(feature = "failpoints")]
use two_rusty_forks::rusty_fork_test;
use vfs::{MemoryFS, VfsPath};
#[test]
fn be_released_when_dropped() -> Result<()> {
let temp_dir: VfsPath = MemoryFS::new().into();
let temp_dir = tempdir()?;
let initial_number_of_entries = temp_dir.path().read_dir()?.count();
{
let _lock = Lock::lock(&temp_dir);
let _lock = Lock::lock(&temp_dir.path())?;
}
let entries = temp_dir.read_dir()?.count();
let entries = temp_dir.path().read_dir()?.count();
assert_eq!(entries, 0);
assert_eq!(entries, initial_number_of_entries);
Ok(())
}
@ -130,9 +141,9 @@ mod must {
#[test]
fn be_able_to_lock_when_creating_lock_file_fails_sometimes() {
fail::cfg("create-lock-file", "90%10*return(some lock file creation error)->off").unwrap();
let path = MemoryFS::new().into();
let temp_dir = tempdir().unwrap();
let lock = Lock::lock(&path).unwrap();
let lock = Lock::lock(&temp_dir.path()).unwrap();
lock.release().unwrap();
}
}
@ -142,9 +153,9 @@ mod must {
#[test]
fn know_to_give_up_when_creating_lock_file_always_fails() {
fail::cfg("create-lock-file", "return(persistent lock file creation error)").unwrap();
let path = MemoryFS::new().into();
let temp_dir = tempdir().unwrap();
assert!(Lock::lock_with_timeout(&path, 1).is_err());
assert!(Lock::lock_with_timeout(&temp_dir.path(), 1).is_err());
}
}
}

View file

@ -1,8 +1,7 @@
use std::collections::hash_map::Iter;
use std::collections::HashMap;
use std::{collections::hash_map::Iter, path::Path};
use serde::{Deserialize, Serialize};
use vfs::VfsPath;
use crate::index::item::IndexItem;
use crate::repository::ItemId;
@ -29,15 +28,15 @@ impl Index {
})
}
pub fn remember(&mut self, original_source_path: &VfsPath, relative_path: &str, id: ItemId) {
pub fn remember(&mut self, original_source_path: &Path, relative_path: &str, id: ItemId) {
let item = if let Some(old) = self
.newest_items_by_source_path
.get(&original_source_path.as_str().to_string())
.get(&original_source_path.to_string_lossy().to_string())
{
old.next_version(id, relative_path.to_string())
} else {
IndexItem::from(
original_source_path.as_str().to_string(),
original_source_path.to_string_lossy().to_string(),
relative_path.to_string(),
id,
Version::default(),
@ -46,11 +45,14 @@ impl Index {
self.items_by_file_id.insert(item.id(), item.clone());
self.newest_items_by_source_path
.insert(original_source_path.as_str().to_string(), item);
.insert(original_source_path.to_string_lossy().to_string(), item);
}
pub fn newest_item_by_source_path(&self, path: &VfsPath) -> Result<Option<IndexItem>> {
Ok(self.newest_items_by_source_path.get(&path.as_str().to_string()).cloned())
pub fn newest_item_by_source_path(&self, path: &Path) -> Result<Option<IndexItem>> {
Ok(self
.newest_items_by_source_path
.get(&path.to_string_lossy().to_string())
.cloned())
}
pub fn item_by_id(&self, id: &ItemId) -> Result<Option<IndexItem>> {

View file

@ -2,15 +2,17 @@ use crate::{repository::ItemId, version::Version};
use anyhow::Result;
use anyhow::*;
use nix::unistd::getpid;
use std::fmt;
use std::fmt::{Display, Formatter};
use std::path::Path;
use vfs::VfsPath;
use std::{fmt, path::PathBuf};
use std::{
fmt::{Display, Formatter},
fs,
};
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct RepositoryItem {
relative_path: String,
absolute_path: VfsPath,
absolute_path: PathBuf,
original_source_path: String,
id: ItemId,
version: Version,
@ -23,39 +25,32 @@ impl PartialOrd for RepositoryItem {
}
impl RepositoryItem {
pub fn from(
original_source_path: &str,
absolute_path: &VfsPath,
relative_path: &str,
id: ItemId,
version: Version,
) -> Self {
pub fn from(original_source_path: &str, absolute_path: &Path, relative_path: &str, id: ItemId, version: Version) -> Self {
RepositoryItem {
relative_path: relative_path.to_string(),
absolute_path: absolute_path.clone(),
absolute_path: absolute_path.to_path_buf(),
original_source_path: original_source_path.to_string(),
id,
version,
}
}
pub fn save(&self, save_to: &VfsPath) -> Result<()> {
pub fn save(&self, save_to: &Path) -> Result<()> {
let original_source_path = Path::new(self.original_source_path());
let source_path_relative = original_source_path.strip_prefix("/")?;
let source_path_relative = source_path_relative.to_string_lossy();
let target_path = save_to.join(&source_path_relative)?;
let target_path = save_to.join(&source_path_relative);
let parent = target_path
.parent()
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &target_path.as_str()))?;
log::debug!("[{}] saving data to {}", getpid(), target_path.as_str());
parent.create_dir_all()?;
if !self.absolute_path.exists()? {
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &target_path.to_string_lossy()))?;
log::debug!("[{}] saving data to {}", getpid(), target_path.to_string_lossy());
fs::create_dir_all(parent)?;
if !self.absolute_path.exists() {
return Err(anyhow!("corrupted repository"));
}
self.absolute_path.copy_file(&target_path)?;
fs::copy(&self.absolute_path, &target_path)?;
log::debug!("[{}] saved data to {}", getpid(), target_path.as_str());
log::debug!("[{}] saved data to {}", getpid(), &target_path.to_string_lossy());
Ok(())
}

View file

@ -1,9 +1,12 @@
pub mod item;
use std::fmt::{Debug, Formatter};
use std::io::BufReader;
use std::path::Path;
use std::{fmt, io};
use std::{
fmt::{Debug, Formatter},
path::PathBuf,
};
use std::{fs, path::Path};
use std::{fs::File, io::BufReader};
use crate::index::{Index, IndexItemIterator};
use anyhow::Result;
@ -12,7 +15,7 @@ use item::RepositoryItem;
use serde::{Deserialize, Serialize};
use sha2::Digest;
use sha2::Sha512;
use vfs::{VfsFileType, VfsPath};
use walkdir::WalkDir;
/// represents a place where backup is stored an can be restored from.
/// right now only on-disk directory storage is supported
@ -21,7 +24,7 @@ use vfs::{VfsFileType, VfsPath};
#[derive(Debug)]
pub struct Repository {
/// path to where the repository is stored on disk
path: VfsPath,
path: PathBuf,
index: Index,
}
@ -94,26 +97,26 @@ impl Debug for ItemId {
}
impl<'a> Repository {
pub fn init(path: &VfsPath) -> Result<Repository> {
path.create_dir_all()?;
pub fn init(path: &Path) -> Result<Repository> {
fs::create_dir_all(path)?;
let mut index = Index::new()?;
index.save(path)?;
let repository = Repository::open(path)?;
repository.data_dir()?.create_dir_all()?;
fs::create_dir_all(repository.data_dir()?)?;
Ok(repository)
}
pub fn open(path: &VfsPath) -> Result<Repository> {
pub fn open(path: &Path) -> Result<Repository> {
let index = Index::load(path)?;
let repository = Repository {
path: path.clone(),
path: path.to_path_buf(),
index,
};
Ok(repository)
}
pub fn path(&self) -> &VfsPath {
pub fn path(&self) -> &Path {
&self.path
}
@ -121,28 +124,27 @@ impl<'a> Repository {
self.index.save(&self.path)
}
pub fn store(&mut self, source_path: &VfsPath) -> Result<()> {
pub fn store(&mut self, source_path: &Path) -> Result<()> {
let id = Repository::calculate_id(source_path)?;
let destination = self.data_dir()?;
let destination = destination.join(&id.to_string())?;
let destination = destination.join(&id.to_string());
if source_path.metadata()?.file_type != VfsFileType::File {
if !source_path.metadata()?.is_file() {
return Ok(());
}
let parent = destination
.parent()
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &destination.as_str()))?;
parent.create_dir_all()?;
if !destination.exists()? {
source_path.copy_file(&destination)?;
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &destination.to_string_lossy()))?;
fs::create_dir_all(parent)?;
if !destination.exists() {
fs::copy(&source_path, &destination)?;
}
let destination_path = Path::new(destination.as_str());
let relative_path = destination_path.strip_prefix(&self.path.as_str())?.to_string_lossy();
self.index.remember(source_path, &relative_path, id);
let relative_path = destination.strip_prefix(&self.path())?;
self.index.remember(source_path, &relative_path.to_string_lossy(), id);
Ok(())
}
pub fn newest_item_by_source_path(&self, path: &VfsPath) -> Result<Option<RepositoryItem>> {
pub fn newest_item_by_source_path(&self, path: &Path) -> Result<Option<RepositoryItem>> {
let item = self.index.newest_item_by_source_path(path)?;
match item {
None => Ok(None),
@ -170,7 +172,7 @@ impl<'a> Repository {
let relative_path = index_item.relative_path();
let repository_path = self.path();
let original_source_path = index_item.original_source_path();
let absolute_path = repository_path.join(relative_path)?;
let absolute_path = repository_path.join(relative_path);
Ok(RepositoryItem::from(
&original_source_path,
&absolute_path,
@ -181,21 +183,22 @@ impl<'a> Repository {
}
pub fn data_weight(&self) -> Result<u64> {
let walkdir = self.data_dir()?.walk_dir()?;
let total_size = walkdir
.filter_map(|entry| entry.ok())
.filter_map(|entry| entry.metadata().ok())
.filter(|metadata| metadata.file_type == VfsFileType::File)
.fold(0, |acc, m| acc + m.len);
let walker = WalkDir::new(self.data_dir()?);
let total_size = walker
.into_iter()
.filter_map(|e| e.ok())
.filter_map(|e| e.metadata().ok())
.filter(|m| m.is_file())
.fold(0, |acc, m| acc + m.len());
Ok(total_size)
}
fn data_dir(&self) -> Result<VfsPath> {
Ok(self.path().join(DATA_DIR_NAME)?)
fn data_dir(&self) -> Result<PathBuf> {
Ok(self.path().join(DATA_DIR_NAME))
}
fn calculate_id(source_path: &VfsPath) -> Result<ItemId> {
let source_file = source_path.open_file()?;
fn calculate_id(source_path: &Path) -> Result<ItemId> {
let source_file = File::open(source_path)?;
let mut reader = BufReader::new(source_file);
let mut hasher = Sha512::new();
@ -210,17 +213,17 @@ mod must {
use super::Repository;
use crate::test::source::TestSource;
use anyhow::Result;
use vfs::MemoryFS;
use tempfile::tempdir;
#[test]
fn have_size_equal_to_sum_of_sizes_of_backed_up_files() -> Result<()> {
let file_size1 = 13;
let file_size2 = 27;
let source = TestSource::new()?;
let repository_path = MemoryFS::new().into();
Repository::init(&repository_path)?;
let repository_path = tempdir()?;
Repository::init(&repository_path.path())?;
let mut backup_repository = Repository::open(&repository_path)?;
let mut backup_repository = Repository::open(&repository_path.path())?;
source.write_random_bytes_to_file("file1", file_size1)?;
backup_repository.store(&source.file_path("file1")?)?;

View file

@ -1,14 +1,15 @@
use std::path::Path;
use crate::repository::{item::RepositoryItem, Repository};
use anyhow::Result;
use vfs::VfsPath;
pub struct Engine<'a> {
repository: &'a mut Repository,
target_path: &'a VfsPath,
target_path: &'a Path,
}
impl<'a> Engine<'a> {
pub fn new(repository: &'a mut Repository, target_path: &'a VfsPath) -> Result<Self> {
pub fn new(repository: &'a mut Repository, target_path: &'a Path) -> Result<Self> {
Ok(Engine { repository, target_path })
}

View file

@ -1,5 +1,9 @@
pub mod in_memory {
use std::path::Path;
use std::{
fs::File,
io::Read,
path::{Path, PathBuf},
};
use crate::{
backup,
@ -8,73 +12,60 @@ pub mod in_memory {
test::source::TestSource,
};
use anyhow::Result;
use vfs::{MemoryFS, VfsFileType, VfsPath};
use rand::Rng;
use tempfile::tempdir;
use walkdir::WalkDir;
pub fn random_in_memory_path(prefix: &str) -> Result<VfsPath> {
let path: VfsPath = MemoryFS::new().into();
let path = path.join(&format!("{}-{}", prefix, rand::thread_rng().gen::<u64>()))?;
Ok(path)
}
pub fn assert_same_after_restore(source_path: &Path) -> Result<()> {
let repository_path = tempdir()?;
let restore_target = tempdir()?;
pub fn assert_same_after_restore(source_path: &VfsPath) -> Result<()> {
let repository_path: VfsPath = random_in_memory_path("repository")?;
let restore_target: VfsPath = random_in_memory_path("target")?;
assert_ne!(source_path, &repository_path);
assert_ne!(repository_path, restore_target);
Repository::init(&repository_path)?;
Repository::init(&repository_path.path())?;
{
let mut backup_repository = Repository::open(&repository_path)?;
let mut backup_repository = Repository::open(&repository_path.path())?;
let mut backup_engine = backup::Engine::new(source_path, &mut backup_repository)?;
backup_engine.backup()?;
}
{
let mut restore_repository = Repository::open(&repository_path)?;
let mut restore_repository = Repository::open(&repository_path.path())?;
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?;
restore_engine.restore_all()?;
}
assert_directory_trees_have_same_contents(source_path, &restore_target)?;
assert_directory_trees_have_same_contents(source_path, &restore_target.path())?;
Ok(())
}
pub fn assert_restored_file_contents(
repository_path: &VfsPath,
source_file_full_path: &VfsPath,
contents: &[u8],
) -> Result<()> {
pub fn assert_restored_file_contents(repository_path: &Path, source_file_full_path: &Path, contents: &[u8]) -> Result<()> {
let mut restore_repository = Repository::open(repository_path)?;
let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?;
let restore_target = random_in_memory_path("target")?;
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
let restore_target = tempdir()?;
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?;
restore_engine.restore(&item.unwrap())?;
let source_file_relative_path = Path::new(source_file_full_path.as_str()).strip_prefix("/")?;
let restored_file_path = restore_target.join(&source_file_relative_path.to_string_lossy())?;
let source_file_relative_path = Path::new(source_file_full_path).strip_prefix("/")?;
let restored_file_path = restore_target.path().join(&source_file_relative_path);
assert_target_file_contents(&restored_file_path, contents)
}
pub fn assert_restored_from_version_has_contents(
repository_path: &VfsPath,
source_file_full_path: &VfsPath,
repository_path: &Path,
source_file_full_path: &Path,
old_contents: &[u8],
old_id: &ItemId,
) -> Result<()> {
let mut restore_repository = Repository::open(repository_path)?;
let old_item = restore_repository.item_by_id(&old_id)?;
let restore_target = random_in_memory_path("target")?;
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
let restore_target = tempdir()?;
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?;
restore_engine.restore(&old_item.unwrap())?;
let source_file_relative_path = Path::new(source_file_full_path.as_str()).strip_prefix("/")?;
let restored_file_path = restore_target.join(&source_file_relative_path.to_string_lossy())?;
let source_file_relative_path = Path::new(source_file_full_path).strip_prefix("/")?;
let restored_file_path = restore_target.path().join(&source_file_relative_path);
assert_target_file_contents(&restored_file_path, old_contents)
}
pub fn newest_item(repository_path: &VfsPath, source_file_full_path: &VfsPath) -> Result<RepositoryItem> {
pub fn newest_item(repository_path: &Path, source_file_full_path: &Path) -> Result<RepositoryItem> {
let item = {
let reading_repository = Repository::open(repository_path)?;
let item = reading_repository.newest_item_by_source_path(&source_file_full_path)?;
@ -84,7 +75,7 @@ pub mod in_memory {
Ok(item)
}
pub fn restore_all_from_reloaded_repository(repository_path: &VfsPath, restore_target: &VfsPath) -> Result<()> {
pub fn restore_all_from_reloaded_repository(repository_path: &Path, restore_target: &Path) -> Result<()> {
{
let mut restore_repository = Repository::open(repository_path)?;
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
@ -95,7 +86,7 @@ pub mod in_memory {
pub fn backup_file_with_text_contents(
source: &TestSource,
repository_path: &VfsPath,
repository_path: &Path,
source_file_relative_path: &str,
contents: &str,
) -> Result<()> {
@ -106,7 +97,7 @@ pub mod in_memory {
pub fn backup_file_with_byte_contents(
source: &TestSource,
repository_path: &VfsPath,
repository_path: &Path,
source_file_relative_path: &str,
contents: &[u8],
) -> Result<()> {
@ -120,22 +111,22 @@ pub mod in_memory {
}
}
pub fn data_weight(repository_path: &VfsPath) -> Result<u64> {
pub fn data_weight(repository_path: &Path) -> Result<u64> {
{
let repository = Repository::open(repository_path)?;
Ok(repository.data_weight()?)
}
}
fn assert_directory_trees_have_same_contents(left: &VfsPath, right: &VfsPath) -> Result<()> {
fn assert_directory_trees_have_same_contents(left: &Path, right: &Path) -> Result<()> {
let left_files = get_sorted_files_recursively(left)?;
let right_files = get_sorted_files_recursively(right)?;
let pairs = left_files.iter().zip(right_files);
for (l, r) in pairs {
assert_eq!(l.filename(), r.filename());
let mut fl = l.open_file()?;
let mut fr = r.open_file()?;
assert_eq!(l.file_name(), r.file_name());
let mut fl = File::open(l)?;
let mut fr = File::open(r)?;
let mut bl = vec![];
let mut br = vec![];
fl.read_to_end(&mut bl).unwrap();
@ -145,35 +136,32 @@ pub mod in_memory {
Ok(())
}
pub fn get_sorted_files_recursively(path: &VfsPath) -> Result<Vec<VfsPath>> {
pub fn get_sorted_files_recursively(path: &Path) -> Result<Vec<PathBuf>> {
assert!(
path.exists()?,
path.exists(),
"[get_sorted_files_recursively] invoked on a path that does not exist: {:?}",
path
);
let walker = path.walk_dir()?;
let mut result = vec![];
for maybe_entry in walker {
let entry = &maybe_entry?;
if entry == path {
continue;
}
if entry.metadata()?.file_type == VfsFileType::File {
result.push(entry.clone());
}
}
result.sort_by_key(|a| a.filename());
let walker = WalkDir::new(path);
let result = walker
.sort_by_file_name()
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| e.metadata().map_or(false, |m| m.is_file()))
.map(|e| e.path().to_path_buf())
.collect::<Vec<_>>();
Ok(result)
}
fn assert_target_file_contents(restored_path: &VfsPath, expected_contents: &[u8]) -> Result<()> {
fn assert_target_file_contents(restored_path: &Path, expected_contents: &[u8]) -> Result<()> {
let mut actual_contents = vec![];
assert!(restored_path.exists()?, "expected '{}' to be there", restored_path.as_str());
restored_path.open_file()?.read_to_end(&mut actual_contents)?;
assert!(
restored_path.exists(),
"expected '{}' to be there",
restored_path.to_string_lossy()
);
let mut file = File::open(restored_path)?;
file.read_to_end(&mut actual_contents)?;
assert_eq!(expected_contents, actual_contents);
Ok(())
}

View file

@ -1,24 +1,25 @@
use std::io::Write;
use std::{
fs::{self, File},
io::Write,
path::{Path, PathBuf},
};
use anyhow::Result;
use vfs::VfsPath;
use super::assertions::in_memory::random_in_memory_path;
use tempfile::{tempdir, TempDir};
pub struct TestSource {
directory: VfsPath,
directory: TempDir,
}
impl TestSource {
pub fn new() -> Result<Self> {
let path: VfsPath = random_in_memory_path("testsource")?;
path.create_dir_all()?;
Ok(Self { directory: path })
let dir = tempdir()?;
Ok(Self { directory: dir })
}
pub fn write_bytes_to_file(&self, filename: &str, bytes: &[u8]) -> Result<()> {
let path = self.file_path(filename)?;
let mut file = path.create_file()?;
let mut file = File::create(path)?;
file.write_all(bytes)?;
Ok(())
}
@ -33,19 +34,19 @@ impl TestSource {
Ok(())
}
pub fn path(&self) -> &VfsPath {
&self.directory
pub fn path(&self) -> &Path {
&self.directory.path()
}
pub fn file_path(&self, filename: &str) -> Result<VfsPath> {
let file_path = self.directory.join(filename)?;
pub fn file_path(&self, filename: &str) -> Result<PathBuf> {
let file_path = self.directory.path().join(filename);
Ok(file_path)
}
}
impl Drop for TestSource {
fn drop(&mut self) {
let _ = self.path().remove_dir_all();
let _ = fs::remove_dir_all(self.path());
}
}
#[cfg(test)]
@ -59,10 +60,10 @@ mod must {
{
let source = TestSource::new()?;
source.write_random_bytes_to_file("somefile", 1)?;
path = source.path().clone();
path = source.path().to_path_buf();
}
assert!(!path.exists()?);
assert!(!path.exists());
Ok(())
}
}

View file

@ -1,5 +1,10 @@
#[cfg(test)]
mod must {
use std::{
fs,
path::{Path, PathBuf},
};
use anyhow::Result;
use bakare::test::source::TestSource;
use bakare::{backup, restore};
@ -9,15 +14,15 @@ mod must {
sys::wait::{waitpid, WaitStatus},
unistd::getpid,
};
use vfs::{PhysicalFS, VfsPath};
use tempfile::tempdir;
#[test]
fn handle_concurrent_backups() -> Result<()> {
setup_logger();
let repository_directory = tempfile::tempdir()?.into_path();
let repository_path: VfsPath = PhysicalFS::new(repository_directory).into();
let repository_path = repository_path.join(&format!("repository-{}", getpid()))?;
let dir = tempdir()?;
let repository_path = dir.path();
let repository_path = repository_path.join(&format!("repository-{}", getpid()));
Repository::init(&repository_path)?;
let parallel_backups_number = 16;
@ -28,8 +33,8 @@ mod must {
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
assert!(data_weight(&repository_path)? > 0);
let target_path: VfsPath = random_in_memory_path("target")?;
let all_restored_files = restore_all(&repository_path, &target_path)?;
let target_path = tempdir()?;
let all_restored_files = restore_all(&repository_path, &target_path.path())?;
assert_eq!(all_restored_files.len(), total_number_of_files);
assert_all_files_in_place(parallel_backups_number, files_per_backup_number, &all_restored_files)?;
@ -39,14 +44,16 @@ mod must {
fn assert_all_files_in_place(
parallel_backups_number: usize,
files_per_backup_number: usize,
all_restored_files: &[VfsPath],
all_restored_files: &[PathBuf],
) -> Result<()> {
for i in 0..parallel_backups_number {
for j in 0..files_per_backup_number {
let id = file_id(i, j);
let file = all_restored_files.iter().find(|f| f.filename() == id);
assert!(file.unwrap().exists()?, "file {:?} does not exist", file);
let contents = file.unwrap().read_to_string()?;
let file = all_restored_files
.iter()
.find(|f| f.file_name().unwrap().to_string_lossy() == id);
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
let contents = fs::read_to_string(file.unwrap())?;
assert_eq!(id.to_string(), contents.to_owned());
}
}
@ -54,7 +61,7 @@ mod must {
}
fn backup_in_parallel(
repository_path: &VfsPath,
repository_path: &Path,
parallel_backups_number: usize,
files_per_backup_number: usize,
) -> Result<Vec<usize>> {
@ -86,7 +93,7 @@ mod must {
Ok(task_numbers)
}
fn backup_process(task_number: usize, repository_path: &VfsPath, files_per_backup_number: usize) -> Result<()> {
fn backup_process(task_number: usize, repository_path: &Path, files_per_backup_number: usize) -> Result<()> {
let mut repository = Repository::open(repository_path)?;
let source = TestSource::new().unwrap();
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
@ -98,7 +105,7 @@ mod must {
Ok(())
}
fn restore_all(repository_path: &VfsPath, restore_target: &VfsPath) -> Result<Vec<VfsPath>> {
fn restore_all(repository_path: &Path, restore_target: &Path) -> Result<Vec<PathBuf>> {
let mut restore_repository = Repository::open(repository_path)?;
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
restore_engine.restore_all()?;

View file

@ -3,12 +3,14 @@ mod must {
use bakare::test::assertions::in_memory::*;
use bakare::{repository::Repository, test::source::TestSource};
use proptest::prelude::*;
use tempfile::tempdir;
proptest! {
#[test]
fn store_duplicated_files_just_once(contents in any::<[u8;3]>()) {
let source = TestSource::new().unwrap();
let repository_path = random_in_memory_path("repository").unwrap();
let dir = tempdir().unwrap();
let repository_path = dir.path();
Repository::init(&repository_path).unwrap();
assert_eq!(data_weight(&repository_path).unwrap(), 0);

View file

@ -4,6 +4,7 @@ mod must {
use bakare::backup;
use bakare::test::assertions::in_memory::*;
use bakare::{repository::Repository, test::source::TestSource};
use tempfile::tempdir;
#[test]
fn restore_multiple_files() -> Result<()> {
@ -19,8 +20,9 @@ mod must {
#[test]
fn restore_files_after_reopening_repository() -> Result<()> {
let source = TestSource::new()?;
let repository_path = random_in_memory_path("repository")?;
let restore_target = random_in_memory_path("target")?;
let dir = tempdir()?;
let repository_path = dir.path();
let restore_target = tempdir()?;
Repository::init(&repository_path)?;
@ -29,7 +31,7 @@ mod must {
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, original_contents)?;
restore_all_from_reloaded_repository(&repository_path, &restore_target)?;
restore_all_from_reloaded_repository(&repository_path, &restore_target.path())?;
let source_file_full_path = &source.file_path(source_file_relative_path)?;
assert_restored_file_contents(&repository_path, source_file_full_path, original_contents.as_bytes())
@ -38,7 +40,8 @@ mod must {
#[test]
fn restore_older_version_of_file() -> Result<()> {
let source = TestSource::new().unwrap();
let repository_path = random_in_memory_path("repository")?;
let dir = tempdir()?;
let repository_path = dir.path();
Repository::init(&repository_path)?;
let source_file_relative_path = "some path";
@ -59,7 +62,8 @@ mod must {
#[test]
fn newer_version_should_be_greater_than_earlier_version() -> Result<()> {
let source = TestSource::new().unwrap();
let repository_path = random_in_memory_path("repository")?;
let dir = tempdir()?;
let repository_path = dir.path();
Repository::init(&repository_path)?;
let source_file_relative_path = "some path";
@ -83,7 +87,8 @@ mod must {
#[test]
fn restore_latest_version_by_default() -> Result<()> {
let source = TestSource::new().unwrap();
let repository_path = random_in_memory_path("repository")?;
let dir = tempdir()?;
let repository_path = dir.path();
Repository::init(&repository_path)?;
let source_file_relative_path = "some path";
@ -97,7 +102,8 @@ mod must {
#[test]
fn forbid_backup_of_paths_within_repository() -> Result<()> {
let repository_path = random_in_memory_path("repository")?;
let dir = tempdir()?;
let repository_path = dir.path();
Repository::init(&repository_path)?;
let mut repository = Repository::open(&repository_path)?;