Migrate to VFS to make tests faster
This commit is contained in:
parent
94ccf98a1c
commit
d363227643
14 changed files with 515 additions and 486 deletions
|
@ -25,11 +25,11 @@ sha2 = "0.9"
|
||||||
tempfile = "3.1"
|
tempfile = "3.1"
|
||||||
thiserror = "1.0"
|
thiserror = "1.0"
|
||||||
uuid = { version = "0.8", features = ["v4"] }
|
uuid = { version = "0.8", features = ["v4"] }
|
||||||
|
vfs = "0.4"
|
||||||
walkdir = "2.3"
|
walkdir = "2.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
proptest = "0.10"
|
proptest = "0.10"
|
||||||
vfs = "0.4"
|
|
||||||
|
|
||||||
[dev-dependencies.cargo-husky]
|
[dev-dependencies.cargo-husky]
|
||||||
version = "1"
|
version = "1"
|
||||||
|
|
|
@ -1,32 +1,35 @@
|
||||||
|
use crate::repository::Repository;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::*;
|
use anyhow::*;
|
||||||
use std::path::Path;
|
use vfs::VfsPath;
|
||||||
|
|
||||||
use walkdir::WalkDir;
|
|
||||||
|
|
||||||
use crate::repository::Repository;
|
|
||||||
|
|
||||||
pub struct Engine<'a> {
|
pub struct Engine<'a> {
|
||||||
source_path: &'a Path,
|
source_path: &'a VfsPath,
|
||||||
repository: &'a mut Repository,
|
repository: &'a mut Repository,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Engine<'a> {
|
impl<'a> Engine<'a> {
|
||||||
pub fn new(source_path: &'a Path, repository: &'a mut Repository) -> Result<Self> {
|
pub fn new(source_path: &'a VfsPath, repository: &'a mut Repository) -> Result<Self> {
|
||||||
if source_path.ancestors().any(|a| a == repository.path()) {
|
let mut ancestors = vec![];
|
||||||
|
let mut current = Some(source_path.clone());
|
||||||
|
while let Some(path) = current {
|
||||||
|
ancestors.push(path.clone());
|
||||||
|
current = path.parent();
|
||||||
|
}
|
||||||
|
if ancestors.into_iter().any(|a| &a == repository.path()) {
|
||||||
return Err(anyhow!("source same as repository"));
|
return Err(anyhow!("source same as repository"));
|
||||||
}
|
}
|
||||||
Ok(Engine { source_path, repository })
|
Ok(Engine { source_path, repository })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup(&mut self) -> Result<()> {
|
pub fn backup(&mut self) -> Result<()> {
|
||||||
let walker = WalkDir::new(self.source_path);
|
let walker = self.source_path.walk_dir()?;
|
||||||
let save_every = 16;
|
let save_every = 16;
|
||||||
let mut save_counter = 0;
|
let mut save_counter = 0;
|
||||||
for maybe_entry in walker {
|
for maybe_entry in walker {
|
||||||
let entry = maybe_entry?;
|
let entry = maybe_entry?;
|
||||||
if entry.path() != self.source_path {
|
if &entry != self.source_path {
|
||||||
self.repository.store(entry.path())?;
|
self.repository.store(&entry)?;
|
||||||
}
|
}
|
||||||
save_counter += 1;
|
save_counter += 1;
|
||||||
if save_counter == save_every {
|
if save_counter == save_every {
|
||||||
|
|
112
src/index/io.rs
112
src/index/io.rs
|
@ -1,7 +1,5 @@
|
||||||
use atomicwrites::{AllowOverwrite, AtomicFile};
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fs;
|
use vfs::VfsPath;
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
@ -15,69 +13,80 @@ use nix::unistd::getpid;
|
||||||
use std::{cmp::max, io::Write};
|
use std::{cmp::max, io::Write};
|
||||||
|
|
||||||
impl Index {
|
impl Index {
|
||||||
pub fn load<T: AsRef<Path>>(repository_path: T) -> Result<Self> {
|
pub fn load(repository_path: &VfsPath) -> Result<Self> {
|
||||||
let repository_path = repository_path.as_ref();
|
|
||||||
if !repository_path.exists() {
|
if !repository_path.exists() {
|
||||||
let mut index = Index::new(repository_path);
|
let mut index = Index::new()?;
|
||||||
index.save()?;
|
index.save(repository_path)?;
|
||||||
}
|
}
|
||||||
let lock = Lock::lock(repository_path)?;
|
let lock = Lock::lock(repository_path)?;
|
||||||
let index = Index::load_from_file(&Index::index_file_path_for_repository_path(repository_path))?;
|
let index_file_path = &Index::index_file_path_for_repository_path(repository_path)?;
|
||||||
|
let index = Index::load_from_file(index_file_path)?;
|
||||||
lock.release()?;
|
lock.release()?;
|
||||||
log::debug!(
|
log::debug!(
|
||||||
"[{}] loaded index from {}, version: {}",
|
"[{}] loaded index from {}, version: {}; {} items",
|
||||||
getpid(),
|
getpid(),
|
||||||
repository_path.to_string_lossy(),
|
index_file_path.as_str(),
|
||||||
index.version
|
index.version,
|
||||||
|
index.newest_items_by_source_path.len()
|
||||||
);
|
);
|
||||||
Ok(index)
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save(&mut self) -> Result<()> {
|
pub fn save(&mut self, repository_path: &VfsPath) -> Result<()> {
|
||||||
let lock_id = Uuid::new_v4();
|
let lock_id = Uuid::new_v4();
|
||||||
let lock = Lock::lock(&self.index_directory()?)?;
|
let lock = Lock::lock(repository_path)?;
|
||||||
if self.index_file_path().exists() {
|
|
||||||
let index = Index::load_from_file(&Index::index_file_path_for_repository_path(&self.index_directory()?))?;
|
let index_file_path = &Index::index_file_path_for_repository_path(repository_path)?;
|
||||||
|
if index_file_path.exists() {
|
||||||
|
let index = Index::load_from_file(&Index::index_file_path_for_repository_path(repository_path)?)?;
|
||||||
self.merge_items_by_file_id(index.items_by_file_id);
|
self.merge_items_by_file_id(index.items_by_file_id);
|
||||||
self.merge_newest_items(index.newest_items_by_source_path);
|
self.merge_newest_items(index.newest_items_by_source_path);
|
||||||
self.version = max(self.version, index.version);
|
self.version = max(self.version, index.version);
|
||||||
}
|
}
|
||||||
self.version = self.version.next();
|
self.version = self.version.next();
|
||||||
self.write_index_to_file(&self.index_file_path())?;
|
self.write_index_to_file(index_file_path)?;
|
||||||
lock.release()?;
|
lock.release()?;
|
||||||
log::debug!("[{}] saved index version {} with lock id {}", getpid(), self.version, lock_id,);
|
log::debug!(
|
||||||
|
"[{}] saved index version {} with lock id {} to {}; {} items",
|
||||||
|
getpid(),
|
||||||
|
self.version,
|
||||||
|
lock_id,
|
||||||
|
index_file_path.as_str(),
|
||||||
|
self.newest_items_by_source_path.len()
|
||||||
|
);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_index_to_file(&mut self, path: &Path) -> Result<()> {
|
fn write_index_to_file(&mut self, index_file_path: &VfsPath) -> Result<()> {
|
||||||
fs::create_dir_all(
|
let parent = index_file_path.parent();
|
||||||
path.parent()
|
match parent {
|
||||||
.ok_or_else(|| anyhow!("cannot compute parent path for {}", path.to_string_lossy()))?,
|
None => Err(anyhow!(format!("cannot get parent for {}", index_file_path.as_str()))),
|
||||||
)
|
Some(parent) => Ok(parent
|
||||||
.context("create index directory")?;
|
.create_dir_all()
|
||||||
|
.context(format!("create index directory at {}", index_file_path.as_str()))?),
|
||||||
|
}?;
|
||||||
|
|
||||||
let file = AtomicFile::new(&path, AllowOverwrite);
|
let contents;
|
||||||
|
{
|
||||||
file.write(|f| {
|
let mut file = index_file_path.create_file()?;
|
||||||
let contents = serde_json::to_string(&self)?;
|
contents = serde_json::to_string(&self)?;
|
||||||
f.write_all(contents.as_bytes())
|
file.write_all(contents.as_bytes()).context("writing index to disk")?;
|
||||||
})
|
file.flush()?;
|
||||||
.context("writing index to disk")?;
|
}
|
||||||
|
let readback = index_file_path.read_to_string()?;
|
||||||
Ok(())
|
if readback != contents {
|
||||||
|
Err(anyhow!("index readback incorrect"))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_file_path(&self) -> PathBuf {
|
fn load_from_file(index_file_path: &VfsPath) -> Result<Self> {
|
||||||
Path::new(&self.index_path).to_path_buf()
|
let index_text = index_file_path
|
||||||
}
|
.read_to_string()
|
||||||
|
.context(format!("reading index file contents from {}", index_file_path.as_str()))?;
|
||||||
|
|
||||||
fn load_from_file<T: AsRef<Path>>(index_file_path: T) -> Result<Self> {
|
let index: Index = serde_json::from_str(&index_text).context(format!("cannot read index from: {}", index_text))?;
|
||||||
let path_text = format!("{}", index_file_path.as_ref().to_string_lossy());
|
|
||||||
let index_text =
|
|
||||||
fs::read_to_string(path_text.clone()).context(format!("reading index file contents from {}", path_text))?;
|
|
||||||
|
|
||||||
let mut index: Index = serde_json::from_str(&index_text).context(format!("cannot read index from: {}", index_text))?;
|
|
||||||
index.index_path = path_text;
|
|
||||||
Ok(index)
|
Ok(index)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,16 +106,8 @@ impl Index {
|
||||||
self.items_by_file_id.extend(old_items_by_file_id);
|
self.items_by_file_id.extend(old_items_by_file_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn index_file_path_for_repository_path(path: &Path) -> PathBuf {
|
fn index_file_path_for_repository_path(path: &VfsPath) -> Result<VfsPath> {
|
||||||
path.join("index")
|
Ok(path.join("index")?)
|
||||||
}
|
|
||||||
|
|
||||||
fn index_directory(&self) -> Result<PathBuf> {
|
|
||||||
Ok(self
|
|
||||||
.index_file_path()
|
|
||||||
.parent()
|
|
||||||
.ok_or_else(|| anyhow!("cannot compute parent path for {}", self.index_file_path().to_string_lossy()))?
|
|
||||||
.to_path_buf())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,14 +115,15 @@ impl Index {
|
||||||
mod must {
|
mod must {
|
||||||
use crate::index::Index;
|
use crate::index::Index;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
use vfs::{MemoryFS, VfsPath};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn have_version_increased_when_saved() -> Result<()> {
|
fn have_version_increased_when_saved() -> Result<()> {
|
||||||
let temp_dir = tempfile::tempdir()?;
|
let temp_dir: VfsPath = MemoryFS::new().into();
|
||||||
let mut index = Index::new(&temp_dir.into_path());
|
let mut index = Index::new()?;
|
||||||
let old_version = index.version;
|
let old_version = index.version;
|
||||||
|
|
||||||
index.save()?;
|
index.save(&temp_dir)?;
|
||||||
|
|
||||||
let new_version = index.version;
|
let new_version = index.version;
|
||||||
|
|
||||||
|
|
|
@ -50,8 +50,8 @@ impl IndexItem {
|
||||||
impl From<RepositoryItem> for IndexItem {
|
impl From<RepositoryItem> for IndexItem {
|
||||||
fn from(i: RepositoryItem) -> Self {
|
fn from(i: RepositoryItem) -> Self {
|
||||||
IndexItem {
|
IndexItem {
|
||||||
relative_path: i.relative_path().to_string_lossy().to_string(),
|
relative_path: i.relative_path().to_string(),
|
||||||
original_source_path: i.original_source_path().to_string_lossy().to_string(),
|
original_source_path: i.original_source_path().to_string(),
|
||||||
id: i.id().clone(),
|
id: i.id().clone(),
|
||||||
version: *i.version(),
|
version: *i.version(),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,26 +1,22 @@
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::*;
|
|
||||||
use atomicwrites::{AtomicFile, DisallowOverwrite};
|
|
||||||
use glob::{glob, Paths};
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
use vfs::VfsPath;
|
||||||
|
|
||||||
use rand::{rngs::OsRng, RngCore};
|
use rand::{rngs::OsRng, RngCore};
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
|
||||||
pub struct Lock {
|
pub struct Lock {
|
||||||
path: PathBuf,
|
path: VfsPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Lock {
|
impl Lock {
|
||||||
pub fn lock<T: AsRef<Path>>(index_directory: T) -> Result<Self> {
|
pub fn lock(index_directory: &VfsPath) -> Result<Self> {
|
||||||
let index_directory = index_directory.as_ref();
|
|
||||||
let mut buffer = [0u8; 16];
|
let mut buffer = [0u8; 16];
|
||||||
OsRng.fill_bytes(&mut buffer);
|
OsRng.fill_bytes(&mut buffer);
|
||||||
let id = Uuid::from_bytes(buffer);
|
let id = Uuid::from_bytes(buffer);
|
||||||
Lock::wait_to_have_sole_lock(id, index_directory)?;
|
Lock::wait_to_have_sole_lock(id, index_directory)?;
|
||||||
let path = Lock::lock_file_path(index_directory, id);
|
let path = Lock::lock_file_path(index_directory, id)?;
|
||||||
Ok(Lock { path })
|
Ok(Lock { path })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,16 +27,16 @@ impl Lock {
|
||||||
|
|
||||||
fn delete_lock_file(&self) -> Result<()> {
|
fn delete_lock_file(&self) -> Result<()> {
|
||||||
if self.path.exists() {
|
if self.path.exists() {
|
||||||
std::fs::remove_file(&self.path)?;
|
self.path.remove_file()?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn wait_to_have_sole_lock(lock_id: Uuid, index_directory: &Path) -> Result<()> {
|
fn wait_to_have_sole_lock(lock_id: Uuid, index_directory: &VfsPath) -> Result<()> {
|
||||||
Lock::create_lock_file(lock_id, index_directory)?;
|
Lock::create_lock_file(lock_id, index_directory)?;
|
||||||
while !Lock::sole_lock(lock_id, index_directory)? {
|
while !Lock::sole_lock(lock_id, index_directory)? {
|
||||||
let path = Lock::lock_file_path(index_directory, lock_id);
|
let path = Lock::lock_file_path(index_directory, lock_id)?;
|
||||||
std::fs::remove_file(path)?;
|
path.remove_file()?;
|
||||||
let sleep_duration = time::Duration::from_millis((OsRng.next_u32() % 256).into());
|
let sleep_duration = time::Duration::from_millis((OsRng.next_u32() % 256).into());
|
||||||
thread::sleep(sleep_duration);
|
thread::sleep(sleep_duration);
|
||||||
Lock::create_lock_file(lock_id, index_directory)?;
|
Lock::create_lock_file(lock_id, index_directory)?;
|
||||||
|
@ -48,13 +44,12 @@ impl Lock {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sole_lock(lock_id: Uuid, index_directory: &Path) -> Result<bool> {
|
fn sole_lock(lock_id: Uuid, index_directory: &VfsPath) -> Result<bool> {
|
||||||
let my_lock_file_path = Lock::lock_file_path(index_directory, lock_id);
|
let my_lock_file_path = Lock::lock_file_path(index_directory, lock_id)?;
|
||||||
let locks = Lock::all_locks(index_directory)?;
|
let locks = Lock::all_locks(index_directory)?;
|
||||||
let mut only_mine = true;
|
let mut only_mine = true;
|
||||||
for path in locks {
|
for path in locks {
|
||||||
let path = path?;
|
if path != my_lock_file_path {
|
||||||
if path.to_string_lossy() != my_lock_file_path.to_string_lossy() {
|
|
||||||
only_mine = false;
|
only_mine = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -62,27 +57,25 @@ impl Lock {
|
||||||
Ok(only_mine)
|
Ok(only_mine)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn all_locks(index_directory: &Path) -> Result<Paths> {
|
fn all_locks(index_directory: &VfsPath) -> Result<Vec<VfsPath>> {
|
||||||
let locks_glob = Lock::locks_glob(index_directory);
|
Ok(index_directory
|
||||||
Ok(glob(&locks_glob)?)
|
.read_dir()?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|f| f.filename().ends_with(".lock"))
|
||||||
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_lock_file(lock_id: Uuid, index_directory: &Path) -> Result<()> {
|
fn create_lock_file(lock_id: Uuid, index_directory: &VfsPath) -> Result<()> {
|
||||||
let lock_file_path = Lock::lock_file_path(index_directory, lock_id);
|
let lock_file_path = Lock::lock_file_path(index_directory, lock_id)?;
|
||||||
let file = AtomicFile::new(lock_file_path, DisallowOverwrite);
|
let mut file = lock_file_path.create_file()?;
|
||||||
match file.write(|f| f.write_all(lock_id.to_hyphenated().to_string().as_bytes())) {
|
let lock_id_text = lock_id.to_hyphenated().to_string();
|
||||||
Ok(_) => Ok(()),
|
let lock_id_bytes = lock_id_text.as_bytes();
|
||||||
Err(e) => Err(anyhow!("error acquiring lock: {}", e)),
|
Ok(file.write_all(lock_id_bytes)?)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn lock_file_path(path: &Path, lock_id: Uuid) -> PathBuf {
|
fn lock_file_path(path: &VfsPath, lock_id: Uuid) -> Result<VfsPath> {
|
||||||
let path_text = &format!("{}/{}.lock", path.to_string_lossy(), lock_id);
|
let file_name = format!("{}.lock", lock_id);
|
||||||
Path::new(path_text).to_path_buf()
|
Ok(path.join(&file_name)?)
|
||||||
}
|
|
||||||
|
|
||||||
fn locks_glob(path: &Path) -> String {
|
|
||||||
format!("{}/*.lock", path.to_string_lossy())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,19 +89,17 @@ impl Drop for Lock {
|
||||||
mod must {
|
mod must {
|
||||||
use super::Lock;
|
use super::Lock;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use std::{fs, io};
|
use vfs::{MemoryFS, VfsPath};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn be_released_when_dropped() -> Result<()> {
|
fn be_released_when_dropped() -> Result<()> {
|
||||||
let temp_dir = tempfile::tempdir()?;
|
let temp_dir: VfsPath = MemoryFS::new().into();
|
||||||
{
|
{
|
||||||
let _lock = Lock::lock(&temp_dir.path());
|
let _lock = Lock::lock(&temp_dir);
|
||||||
}
|
}
|
||||||
let entries = fs::read_dir(temp_dir.into_path())?
|
let entries = temp_dir.read_dir()?.count();
|
||||||
.map(|res| res.map(|e| e.path()))
|
|
||||||
.collect::<Result<Vec<_>, io::Error>>()?;
|
|
||||||
|
|
||||||
assert_eq!(entries.len(), 0);
|
assert_eq!(entries, 0);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,52 +1,44 @@
|
||||||
use std::collections::hash_map::Iter;
|
use std::collections::hash_map::Iter;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use vfs::VfsPath;
|
||||||
|
|
||||||
use crate::index::item::IndexItem;
|
use crate::index::item::IndexItem;
|
||||||
use crate::repository::{item::RepositoryItem, ItemId};
|
use crate::repository::ItemId;
|
||||||
use crate::version::Version;
|
use crate::version::Version;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::*;
|
|
||||||
|
|
||||||
mod io;
|
mod io;
|
||||||
mod item;
|
pub mod item;
|
||||||
mod lock;
|
mod lock;
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
pub struct Index {
|
pub struct Index {
|
||||||
newest_items_by_source_path: HashMap<String, IndexItem>,
|
newest_items_by_source_path: HashMap<String, IndexItem>,
|
||||||
items_by_file_id: HashMap<ItemId, IndexItem>,
|
items_by_file_id: HashMap<ItemId, IndexItem>,
|
||||||
index_path: String,
|
|
||||||
repository_path: String,
|
|
||||||
version: Version,
|
version: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Index {
|
impl Index {
|
||||||
pub fn new<T: AsRef<Path>>(repository_path: T) -> Self {
|
pub fn new() -> Result<Self> {
|
||||||
let repository_path = repository_path.as_ref();
|
Ok(Index {
|
||||||
Index {
|
|
||||||
newest_items_by_source_path: Default::default(),
|
newest_items_by_source_path: Default::default(),
|
||||||
items_by_file_id: Default::default(),
|
items_by_file_id: Default::default(),
|
||||||
index_path: repository_path.join("index").to_string_lossy().to_string(),
|
|
||||||
repository_path: repository_path.to_string_lossy().to_string(),
|
|
||||||
version: Version::default(),
|
version: Version::default(),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn remember<S: AsRef<Path>, R: AsRef<Path>>(&mut self, original_source_path: S, relative_path: R, id: ItemId) {
|
pub fn remember(&mut self, original_source_path: &VfsPath, relative_path: &str, id: ItemId) {
|
||||||
let original_source_path = original_source_path.as_ref();
|
|
||||||
let relative_path = relative_path.as_ref();
|
|
||||||
let item = if let Some(old) = self
|
let item = if let Some(old) = self
|
||||||
.newest_items_by_source_path
|
.newest_items_by_source_path
|
||||||
.get(&original_source_path.to_string_lossy().to_string())
|
.get(&original_source_path.as_str().to_string())
|
||||||
{
|
{
|
||||||
old.next_version(id, relative_path.to_string_lossy().to_string())
|
old.next_version(id, relative_path.to_string())
|
||||||
} else {
|
} else {
|
||||||
IndexItem::from(
|
IndexItem::from(
|
||||||
original_source_path.to_string_lossy().to_string(),
|
original_source_path.as_str().to_string(),
|
||||||
relative_path.to_string_lossy().to_string(),
|
relative_path.to_string(),
|
||||||
id,
|
id,
|
||||||
Version::default(),
|
Version::default(),
|
||||||
)
|
)
|
||||||
|
@ -54,34 +46,11 @@ impl Index {
|
||||||
|
|
||||||
self.items_by_file_id.insert(item.id(), item.clone());
|
self.items_by_file_id.insert(item.id(), item.clone());
|
||||||
self.newest_items_by_source_path
|
self.newest_items_by_source_path
|
||||||
.insert(original_source_path.to_string_lossy().to_string(), item);
|
.insert(original_source_path.as_str().to_string(), item);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn repository_item(&self, i: &IndexItem) -> RepositoryItem {
|
pub fn newest_item_by_source_path(&self, path: &VfsPath) -> Result<Option<IndexItem>> {
|
||||||
let index_item = i.clone();
|
Ok(self.newest_items_by_source_path.get(&path.as_str().to_string()).cloned())
|
||||||
let relative_path = Path::new(index_item.relative_path());
|
|
||||||
let repository_path = Path::new(&self.repository_path);
|
|
||||||
let original_source_path = Path::new(index_item.original_source_path());
|
|
||||||
let absolute_path = repository_path.join(relative_path);
|
|
||||||
let absolute_path = absolute_path.as_path();
|
|
||||||
RepositoryItem::from(
|
|
||||||
original_source_path,
|
|
||||||
absolute_path,
|
|
||||||
relative_path,
|
|
||||||
index_item.id(),
|
|
||||||
index_item.version(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn newest_item_by_source_path<T: AsRef<Path>>(&self, path: T) -> Result<Option<IndexItem>> {
|
|
||||||
let path = path.as_ref();
|
|
||||||
if !path.is_absolute() {
|
|
||||||
return Err(anyhow!("repository path not absolute"));
|
|
||||||
}
|
|
||||||
Ok(self
|
|
||||||
.newest_items_by_source_path
|
|
||||||
.get(&path.to_string_lossy().to_string())
|
|
||||||
.cloned())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn item_by_id(&self, id: &ItemId) -> Result<Option<IndexItem>> {
|
pub fn item_by_id(&self, id: &ItemId) -> Result<Option<IndexItem>> {
|
||||||
|
@ -95,6 +64,7 @@ impl Index {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct IndexItemIterator<'a> {
|
pub struct IndexItemIterator<'a> {
|
||||||
iterator: Iter<'a, String, IndexItem>,
|
iterator: Iter<'a, String, IndexItem>,
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,58 +1,67 @@
|
||||||
use crate::{repository::ItemId, version::Version};
|
use crate::{repository::ItemId, version::Version};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::*;
|
use anyhow::*;
|
||||||
|
use nix::unistd::getpid;
|
||||||
|
use std::fmt;
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::{fmt, fs};
|
use vfs::VfsPath;
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialOrd, PartialEq, Ord, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
pub struct RepositoryItem {
|
pub struct RepositoryItem {
|
||||||
relative_path: Box<Path>,
|
relative_path: String,
|
||||||
absolute_path: Box<Path>,
|
absolute_path: VfsPath,
|
||||||
original_source_path: Box<Path>,
|
original_source_path: String,
|
||||||
id: ItemId,
|
id: ItemId,
|
||||||
version: Version,
|
version: Version,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for RepositoryItem {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||||
|
self.id.partial_cmp(&other.id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl RepositoryItem {
|
impl RepositoryItem {
|
||||||
pub fn from(original_source_path: &Path, absolute_path: &Path, relative_path: &Path, id: ItemId, version: Version) -> Self {
|
pub fn from(
|
||||||
|
original_source_path: &str,
|
||||||
|
absolute_path: &VfsPath,
|
||||||
|
relative_path: &str,
|
||||||
|
id: ItemId,
|
||||||
|
version: Version,
|
||||||
|
) -> Self {
|
||||||
RepositoryItem {
|
RepositoryItem {
|
||||||
relative_path: Box::from(relative_path),
|
relative_path: relative_path.to_string(),
|
||||||
absolute_path: Box::from(absolute_path),
|
absolute_path: absolute_path.clone(),
|
||||||
original_source_path: Box::from(original_source_path),
|
original_source_path: original_source_path.to_string(),
|
||||||
id,
|
id,
|
||||||
version,
|
version,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save(&self, save_to: &Path) -> Result<()> {
|
pub fn save(&self, save_to: &VfsPath) -> Result<()> {
|
||||||
if !save_to.is_absolute() {
|
let original_source_path = Path::new(self.original_source_path());
|
||||||
return Err(anyhow!("path to store not absolute"));
|
let source_path_relative = original_source_path.strip_prefix("/")?;
|
||||||
}
|
let source_path_relative = source_path_relative.to_string_lossy();
|
||||||
|
let target_path = save_to.join(&source_path_relative)?;
|
||||||
let target_path = save_to.join(&self.original_source_path.strip_prefix("/")?);
|
|
||||||
if !target_path.is_absolute() {
|
|
||||||
return Err(anyhow!("path to store not absolute"));
|
|
||||||
}
|
|
||||||
let parent = target_path
|
let parent = target_path
|
||||||
.parent()
|
.parent()
|
||||||
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &target_path.to_string_lossy()))?;
|
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &target_path.as_str()))?;
|
||||||
if !parent.exists() {
|
log::debug!("[{}] saving data to {}", getpid(), target_path.as_str());
|
||||||
fs::create_dir_all(parent)?;
|
parent.create_dir_all()?;
|
||||||
}
|
|
||||||
if !self.absolute_path.exists() {
|
if !self.absolute_path.exists() {
|
||||||
return Err(anyhow!("corrupted repository"));
|
return Err(anyhow!("corrupted repository"));
|
||||||
}
|
}
|
||||||
fs::copy(&self.absolute_path, &target_path)?;
|
self.absolute_path.copy_file(&target_path)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn relative_path(&self) -> &Path {
|
pub fn relative_path(&self) -> &str {
|
||||||
&self.relative_path
|
&self.relative_path
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn original_source_path(&self) -> &Path {
|
pub fn original_source_path(&self) -> &str {
|
||||||
&self.original_source_path
|
&self.original_source_path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,11 +76,6 @@ impl RepositoryItem {
|
||||||
|
|
||||||
impl Display for RepositoryItem {
|
impl Display for RepositoryItem {
|
||||||
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
|
||||||
write!(
|
write!(f, "'{}' : {}", self.original_source_path(), hex::encode(self.id()))
|
||||||
f,
|
|
||||||
"'{}' : {}",
|
|
||||||
self.original_source_path().to_string_lossy(),
|
|
||||||
hex::encode(self.id())
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
pub mod item;
|
pub mod item;
|
||||||
|
|
||||||
use std::fmt::Formatter;
|
use std::fmt::{Debug, Formatter};
|
||||||
use std::fs::File;
|
|
||||||
use std::io::BufReader;
|
use std::io::BufReader;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
use std::{fmt, fs, io};
|
use std::{fmt, io};
|
||||||
|
|
||||||
use crate::index::{Index, IndexItemIterator};
|
use crate::index::{Index, IndexItemIterator};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
|
@ -13,28 +12,31 @@ use item::RepositoryItem;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use sha2::Digest;
|
use sha2::Digest;
|
||||||
use sha2::Sha512;
|
use sha2::Sha512;
|
||||||
use walkdir::WalkDir;
|
use vfs::{VfsFileType, VfsPath};
|
||||||
|
|
||||||
/// represents a place where backup is stored an can be restored from.
|
/// represents a place where backup is stored an can be restored from.
|
||||||
/// right now only on-disk directory storage is supported
|
/// right now only on-disk directory storage is supported
|
||||||
/// repository always knows the newest version of the index and is responsible for syncing the index to disk
|
/// repository always knows the newest version of the index and is responsible for syncing the index to disk
|
||||||
/// and making sure that different threads can access index in parallel
|
/// and making sure that different threads can access index in parallel
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct Repository {
|
pub struct Repository {
|
||||||
/// absolute path to where the repository is stored on disk
|
/// path to where the repository is stored on disk
|
||||||
path: PathBuf,
|
path: VfsPath,
|
||||||
index: Index,
|
index: Index,
|
||||||
}
|
}
|
||||||
|
|
||||||
const DATA_DIR_NAME: &str = "data";
|
const DATA_DIR_NAME: &str = "data";
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialOrd, PartialEq, Ord, Eq, Serialize, Deserialize, Hash)]
|
#[derive(Clone, PartialOrd, PartialEq, Ord, Eq, Serialize, Deserialize, Hash)]
|
||||||
pub struct ItemId(#[serde(with = "base64")] Vec<u8>);
|
pub struct ItemId(#[serde(with = "base64")] Vec<u8>);
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct RepositoryItemIterator<'a> {
|
pub struct RepositoryItemIterator<'a> {
|
||||||
|
repository: &'a Repository,
|
||||||
iterator: IndexItemIterator<'a>,
|
iterator: IndexItemIterator<'a>,
|
||||||
index: &'a Index,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//TODO: move to serializers::base64
|
||||||
mod base64 {
|
mod base64 {
|
||||||
use ::base64;
|
use ::base64;
|
||||||
use serde::{de, Deserialize, Deserializer, Serializer};
|
use serde::{de, Deserialize, Deserializer, Serializer};
|
||||||
|
@ -59,7 +61,11 @@ impl<'a> Iterator for RepositoryItemIterator<'a> {
|
||||||
type Item = RepositoryItem;
|
type Item = RepositoryItem;
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
self.iterator.next().map(|i| self.index.repository_item(&i))
|
let item = self.iterator.next();
|
||||||
|
match item {
|
||||||
|
None => None,
|
||||||
|
Some(item) => self.repository.repository_item(&item).ok(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,91 +87,115 @@ impl fmt::Display for ItemId {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Debug for ItemId {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "{}", hex::encode(self))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a> Repository {
|
impl<'a> Repository {
|
||||||
pub fn init<T: AsRef<Path>>(path: T) -> Result<()> {
|
pub fn init(path: &VfsPath) -> Result<Repository> {
|
||||||
let mut index = Index::new(path.as_ref());
|
path.create_dir_all()?;
|
||||||
index.save()?;
|
let mut index = Index::new()?;
|
||||||
Ok(())
|
index.save(path)?;
|
||||||
|
let repository = Repository::open(path)?;
|
||||||
|
repository.data_dir()?.create_dir_all()?;
|
||||||
|
Ok(repository)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn open<T: AsRef<Path>>(path: T) -> Result<Repository> {
|
pub fn open(path: &VfsPath) -> Result<Repository> {
|
||||||
let path = path.as_ref();
|
|
||||||
if !path.is_absolute() {
|
|
||||||
return Err(anyhow!("path to repository not absolute"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let index = Index::load(path)?;
|
let index = Index::load(path)?;
|
||||||
let mut repository_path = PathBuf::new();
|
let repository = Repository {
|
||||||
repository_path.push(path);
|
path: path.clone(),
|
||||||
Ok(Repository {
|
|
||||||
path: repository_path,
|
|
||||||
index,
|
index,
|
||||||
})
|
};
|
||||||
|
|
||||||
|
Ok(repository)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn path(&self) -> &Path {
|
pub fn path(&self) -> &VfsPath {
|
||||||
&self.path
|
&self.path
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn save_index(&mut self) -> Result<()> {
|
pub fn save_index(&mut self) -> Result<()> {
|
||||||
self.index.save()
|
self.index.save(&self.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn store(&mut self, source_path: &Path) -> Result<()> {
|
pub fn store(&mut self, source_path: &VfsPath) -> Result<()> {
|
||||||
if !source_path.is_absolute() {
|
|
||||||
return Err(anyhow!("path to store not absolute"));
|
|
||||||
}
|
|
||||||
let id = Repository::calculate_id(source_path)?;
|
let id = Repository::calculate_id(source_path)?;
|
||||||
let destination_path = self.data_dir();
|
let destination = self.data_dir()?;
|
||||||
let destination_path = destination_path.join(id.to_string());
|
let destination = destination.join(&id.to_string())?;
|
||||||
let destination_path = Path::new(&destination_path);
|
|
||||||
|
|
||||||
if source_path.is_file() {
|
if source_path.metadata()?.file_type != VfsFileType::File {
|
||||||
let parent = destination_path
|
return Ok(());
|
||||||
.parent()
|
|
||||||
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &destination_path.to_string_lossy()))?;
|
|
||||||
fs::create_dir_all(parent)?;
|
|
||||||
fs::copy(source_path, destination_path)?;
|
|
||||||
let relative_path = destination_path.strip_prefix(&self.path)?;
|
|
||||||
self.index.remember(source_path, relative_path, id);
|
|
||||||
}
|
}
|
||||||
|
let parent = destination
|
||||||
|
.parent()
|
||||||
|
.ok_or_else(|| anyhow!("cannot compute parent path for {}", &destination.as_str()))?;
|
||||||
|
parent.create_dir_all()?;
|
||||||
|
if !destination.exists() {
|
||||||
|
source_path.copy_file(&destination)?;
|
||||||
|
}
|
||||||
|
let destination_path = Path::new(destination.as_str());
|
||||||
|
let relative_path = destination_path.strip_prefix(&self.path.as_str())?.to_string_lossy();
|
||||||
|
self.index.remember(source_path, &relative_path, id);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn newest_item_by_source_path(&self, path: &Path) -> Result<Option<RepositoryItem>> {
|
pub fn newest_item_by_source_path(&self, path: &VfsPath) -> Result<Option<RepositoryItem>> {
|
||||||
Ok(self
|
let item = self.index.newest_item_by_source_path(path)?;
|
||||||
.index
|
match item {
|
||||||
.newest_item_by_source_path(path)?
|
None => Ok(None),
|
||||||
.map(|i| self.index.repository_item(&i)))
|
Some(item) => Ok(Some(self.repository_item(&item)?)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn item_by_id(&self, id: &ItemId) -> Result<Option<RepositoryItem>> {
|
pub fn item_by_id(&self, id: &ItemId) -> Result<Option<RepositoryItem>> {
|
||||||
self.index.item_by_id(id).map(|i| i.map(|i| self.index.repository_item(&i)))
|
let item = self.index.item_by_id(id)?;
|
||||||
|
match item {
|
||||||
|
None => Ok(None),
|
||||||
|
Some(item) => Ok(Some(self.repository_item(&item)?)),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn newest_items(&self) -> RepositoryItemIterator {
|
pub fn newest_items(&self) -> RepositoryItemIterator {
|
||||||
RepositoryItemIterator {
|
RepositoryItemIterator {
|
||||||
|
repository: &self,
|
||||||
iterator: self.index.newest_items(),
|
iterator: self.index.newest_items(),
|
||||||
index: &self.index,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn repository_item(&self, i: &crate::index::item::IndexItem) -> Result<RepositoryItem> {
|
||||||
|
let index_item = i.clone();
|
||||||
|
let relative_path = index_item.relative_path();
|
||||||
|
let repository_path = self.path();
|
||||||
|
let original_source_path = index_item.original_source_path();
|
||||||
|
let absolute_path = repository_path.join(relative_path)?;
|
||||||
|
Ok(RepositoryItem::from(
|
||||||
|
&original_source_path,
|
||||||
|
&absolute_path,
|
||||||
|
relative_path,
|
||||||
|
index_item.id(),
|
||||||
|
index_item.version(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn data_weight(&self) -> Result<u64> {
|
pub fn data_weight(&self) -> Result<u64> {
|
||||||
let total_size = WalkDir::new(self.data_dir())
|
let walkdir = self.data_dir()?.walk_dir()?;
|
||||||
.into_iter()
|
let total_size = walkdir
|
||||||
.filter_map(|entry| entry.ok())
|
.filter_map(|entry| entry.ok())
|
||||||
.filter_map(|entry| entry.metadata().ok())
|
.filter_map(|entry| entry.metadata().ok())
|
||||||
.filter(|metadata| metadata.is_file())
|
.filter(|metadata| metadata.file_type == VfsFileType::File)
|
||||||
.fold(0, |acc, m| acc + m.len());
|
.fold(0, |acc, m| acc + m.len);
|
||||||
Ok(total_size)
|
Ok(total_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn data_dir(&self) -> PathBuf {
|
fn data_dir(&self) -> Result<VfsPath> {
|
||||||
self.path().join(DATA_DIR_NAME)
|
Ok(self.path().join(DATA_DIR_NAME)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn calculate_id(source_path: &Path) -> Result<ItemId> {
|
fn calculate_id(source_path: &VfsPath) -> Result<ItemId> {
|
||||||
let source_file = File::open(source_path)?;
|
let source_file = source_path.open_file()?;
|
||||||
let mut reader = BufReader::new(source_file);
|
let mut reader = BufReader::new(source_file);
|
||||||
let mut hasher = Sha512::new();
|
let mut hasher = Sha512::new();
|
||||||
|
|
||||||
|
@ -180,23 +210,23 @@ mod must {
|
||||||
use super::Repository;
|
use super::Repository;
|
||||||
use crate::test::source::TestSource;
|
use crate::test::source::TestSource;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use tempfile::tempdir;
|
use vfs::MemoryFS;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn have_size_equal_to_sum_of_sizes_of_backed_up_files() -> Result<()> {
|
fn have_size_equal_to_sum_of_sizes_of_backed_up_files() -> Result<()> {
|
||||||
let file_size1 = 13;
|
let file_size1 = 13;
|
||||||
let file_size2 = 27;
|
let file_size2 = 27;
|
||||||
let source = TestSource::new()?;
|
let source = TestSource::new()?;
|
||||||
let repository_path = tempdir()?.into_path();
|
let repository_path = MemoryFS::new().into();
|
||||||
Repository::init(&repository_path)?;
|
Repository::init(&repository_path)?;
|
||||||
|
|
||||||
let mut backup_repository = Repository::open(&repository_path)?;
|
let mut backup_repository = Repository::open(&repository_path)?;
|
||||||
source.write_random_bytes_to_file("file1", file_size1)?;
|
source.write_random_bytes_to_file("file1", file_size1)?;
|
||||||
backup_repository.store(&source.file_path("file1"))?;
|
backup_repository.store(&source.file_path("file1")?)?;
|
||||||
|
|
||||||
source.write_random_bytes_to_file("file2", file_size2)?;
|
source.write_random_bytes_to_file("file2", file_size2)?;
|
||||||
|
|
||||||
backup_repository.store(&source.file_path("file2"))?;
|
backup_repository.store(&source.file_path("file2")?)?;
|
||||||
|
|
||||||
assert_eq!(file_size1 + file_size2, backup_repository.data_weight()?);
|
assert_eq!(file_size1 + file_size2, backup_repository.data_weight()?);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -1,24 +1,20 @@
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use crate::repository::{item::RepositoryItem, Repository};
|
use crate::repository::{item::RepositoryItem, Repository};
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use anyhow::*;
|
use vfs::VfsPath;
|
||||||
|
|
||||||
pub struct Engine<'a> {
|
pub struct Engine<'a> {
|
||||||
repository: &'a mut Repository,
|
repository: &'a mut Repository,
|
||||||
target_path: &'a Path,
|
target_path: &'a VfsPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Engine<'a> {
|
impl<'a> Engine<'a> {
|
||||||
pub fn new(repository: &'a mut Repository, target_path: &'a Path) -> Result<Self> {
|
pub fn new(repository: &'a mut Repository, target_path: &'a VfsPath) -> Result<Self> {
|
||||||
if !target_path.is_absolute() {
|
|
||||||
return Err(anyhow!("path to store not absolute"));
|
|
||||||
}
|
|
||||||
Ok(Engine { repository, target_path })
|
Ok(Engine { repository, target_path })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn restore_all(&mut self) -> Result<()> {
|
pub fn restore_all(&mut self) -> Result<()> {
|
||||||
for item in self.repository.newest_items() {
|
let newest_items = self.repository.newest_items();
|
||||||
|
for item in newest_items {
|
||||||
self.restore(&item)?;
|
self.restore(&item)?;
|
||||||
}
|
}
|
||||||
self.repository.save_index()?;
|
self.repository.save_index()?;
|
||||||
|
|
|
@ -1,166 +1,180 @@
|
||||||
use std::fs::File;
|
pub mod in_memory {
|
||||||
use std::io::Read;
|
use std::path::Path;
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use tempfile::tempdir;
|
use crate::{
|
||||||
use walkdir::WalkDir;
|
backup,
|
||||||
|
repository::{item::RepositoryItem, ItemId, Repository},
|
||||||
use super::source::TestSource;
|
restore,
|
||||||
use crate::repository::{item::RepositoryItem, ItemId, Repository};
|
test::source::TestSource,
|
||||||
use crate::{backup, restore};
|
|
||||||
use anyhow::Result;
|
|
||||||
|
|
||||||
pub fn assert_same_after_restore(source_path: &Path) -> Result<()> {
|
|
||||||
let repository_path = tempdir().unwrap().into_path();
|
|
||||||
let restore_target = tempdir().unwrap().into_path();
|
|
||||||
|
|
||||||
assert_ne!(source_path, repository_path);
|
|
||||||
assert_ne!(repository_path, restore_target);
|
|
||||||
|
|
||||||
Repository::init(repository_path.as_path())?;
|
|
||||||
{
|
|
||||||
let mut backup_repository = Repository::open(repository_path.as_path())?;
|
|
||||||
let mut backup_engine = backup::Engine::new(source_path, &mut backup_repository)?;
|
|
||||||
backup_engine.backup()?;
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut restore_repository = Repository::open(repository_path.as_path())?;
|
|
||||||
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
|
||||||
restore_engine.restore_all()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_directory_trees_have_same_contents(source_path, restore_target.as_path())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn assert_restored_file_contents(repository_path: &Path, source_file_full_path: &Path, contents: &[u8]) -> Result<()> {
|
|
||||||
let mut restore_repository = Repository::open(repository_path)?;
|
|
||||||
let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?;
|
|
||||||
let restore_target = tempdir().unwrap();
|
|
||||||
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?;
|
|
||||||
|
|
||||||
restore_engine.restore(&item.unwrap())?;
|
|
||||||
let restored_file_path = restore_target.path().join(source_file_full_path.strip_prefix("/")?);
|
|
||||||
assert_target_file_contents(&restored_file_path, contents)
|
|
||||||
}
|
|
||||||
pub fn assert_restored_file_byte_contents(repository_path: &Path, source_file_full_path: &Path, contents: &[u8]) -> Result<()> {
|
|
||||||
let mut restore_repository = Repository::open(repository_path)?;
|
|
||||||
let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?;
|
|
||||||
let restore_target = tempdir().unwrap();
|
|
||||||
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?;
|
|
||||||
|
|
||||||
restore_engine.restore(&item.unwrap())?;
|
|
||||||
let restored_file_path = restore_target.path().join(source_file_full_path.strip_prefix("/")?);
|
|
||||||
assert_target_file_contents(&restored_file_path, contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn assert_restored_from_version_has_contents(
|
|
||||||
repository_path: &Path,
|
|
||||||
source_file_full_path: &Path,
|
|
||||||
old_contents: &[u8],
|
|
||||||
old_id: &ItemId,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut restore_repository = Repository::open(repository_path)?;
|
|
||||||
let old_item = restore_repository.item_by_id(&old_id)?;
|
|
||||||
let restore_target = tempdir().unwrap();
|
|
||||||
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target.path())?;
|
|
||||||
restore_engine.restore(&old_item.unwrap())?;
|
|
||||||
let restored_file_path = restore_target.path().join(source_file_full_path.strip_prefix("/")?);
|
|
||||||
assert_target_file_contents(&restored_file_path, old_contents)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn newest_item(repository_path: &Path, source_file_full_path: &Path) -> Result<RepositoryItem> {
|
|
||||||
let item = {
|
|
||||||
let reading_repository = Repository::open(repository_path)?;
|
|
||||||
let item = reading_repository.newest_item_by_source_path(&source_file_full_path)?;
|
|
||||||
assert!(item.is_some());
|
|
||||||
item.unwrap()
|
|
||||||
};
|
};
|
||||||
Ok(item)
|
use anyhow::Result;
|
||||||
}
|
use vfs::{MemoryFS, VfsFileType, VfsPath};
|
||||||
|
|
||||||
pub fn restore_all_from_reloaded_repository(repository_path: &Path, restore_target: &Path) -> Result<()> {
|
use rand::Rng;
|
||||||
{
|
|
||||||
|
pub fn random_in_memory_path(prefix: &str) -> Result<VfsPath> {
|
||||||
|
let path: VfsPath = MemoryFS::new().into();
|
||||||
|
let path = path.join(&format!("{}-{}", prefix, rand::thread_rng().gen::<u64>()))?;
|
||||||
|
Ok(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn assert_same_after_restore(source_path: &VfsPath) -> Result<()> {
|
||||||
|
let repository_path: VfsPath = random_in_memory_path("repository")?;
|
||||||
|
let restore_target: VfsPath = random_in_memory_path("target")?;
|
||||||
|
|
||||||
|
assert_ne!(source_path, &repository_path);
|
||||||
|
assert_ne!(repository_path, restore_target);
|
||||||
|
|
||||||
|
Repository::init(&repository_path)?;
|
||||||
|
{
|
||||||
|
let mut backup_repository = Repository::open(&repository_path)?;
|
||||||
|
let mut backup_engine = backup::Engine::new(source_path, &mut backup_repository)?;
|
||||||
|
backup_engine.backup()?;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut restore_repository = Repository::open(&repository_path)?;
|
||||||
|
|
||||||
|
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
||||||
|
restore_engine.restore_all()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_directory_trees_have_same_contents(source_path, &restore_target)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn assert_restored_file_contents(
|
||||||
|
repository_path: &VfsPath,
|
||||||
|
source_file_full_path: &VfsPath,
|
||||||
|
contents: &[u8],
|
||||||
|
) -> Result<()> {
|
||||||
let mut restore_repository = Repository::open(repository_path)?;
|
let mut restore_repository = Repository::open(repository_path)?;
|
||||||
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
let item = restore_repository.newest_item_by_source_path(&source_file_full_path)?;
|
||||||
restore_engine.restore_all()?;
|
let restore_target = random_in_memory_path("target")?;
|
||||||
Ok(())
|
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
||||||
|
|
||||||
|
restore_engine.restore(&item.unwrap())?;
|
||||||
|
let source_file_relative_path = Path::new(source_file_full_path.as_str()).strip_prefix("/")?;
|
||||||
|
let restored_file_path = restore_target.join(&source_file_relative_path.to_string_lossy())?;
|
||||||
|
assert_target_file_contents(&restored_file_path, contents)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub fn backup_file_with_text_contents(
|
pub fn assert_restored_from_version_has_contents(
|
||||||
source: &TestSource,
|
repository_path: &VfsPath,
|
||||||
repository_path: &Path,
|
source_file_full_path: &VfsPath,
|
||||||
source_file_relative_path: &str,
|
old_contents: &[u8],
|
||||||
contents: &str,
|
old_id: &ItemId,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
{
|
let mut restore_repository = Repository::open(repository_path)?;
|
||||||
backup_file_with_byte_contents(source, repository_path, source_file_relative_path, contents.as_bytes())
|
let old_item = restore_repository.item_by_id(&old_id)?;
|
||||||
|
let restore_target = random_in_memory_path("target")?;
|
||||||
|
let restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
||||||
|
restore_engine.restore(&old_item.unwrap())?;
|
||||||
|
let source_file_relative_path = Path::new(source_file_full_path.as_str()).strip_prefix("/")?;
|
||||||
|
let restored_file_path = restore_target.join(&source_file_relative_path.to_string_lossy())?;
|
||||||
|
assert_target_file_contents(&restored_file_path, old_contents)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub fn backup_file_with_byte_contents(
|
pub fn newest_item(repository_path: &VfsPath, source_file_full_path: &VfsPath) -> Result<RepositoryItem> {
|
||||||
source: &TestSource,
|
let item = {
|
||||||
repository_path: &Path,
|
let reading_repository = Repository::open(repository_path)?;
|
||||||
source_file_relative_path: &str,
|
let item = reading_repository.newest_item_by_source_path(&source_file_full_path)?;
|
||||||
contents: &[u8],
|
assert!(item.is_some());
|
||||||
) -> Result<()> {
|
item.unwrap()
|
||||||
{
|
};
|
||||||
let mut backup_repository = Repository::open(repository_path)?;
|
Ok(item)
|
||||||
let mut backup_engine = backup::Engine::new(source.path(), &mut backup_repository)?;
|
|
||||||
source.write_bytes_to_file(source_file_relative_path, contents).unwrap();
|
|
||||||
backup_engine.backup()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
pub fn data_weight(repository_path: &Path) -> Result<u64> {
|
pub fn restore_all_from_reloaded_repository(repository_path: &VfsPath, restore_target: &VfsPath) -> Result<()> {
|
||||||
{
|
{
|
||||||
let repository = Repository::open(repository_path)?;
|
let mut restore_repository = Repository::open(repository_path)?;
|
||||||
Ok(repository.data_weight()?)
|
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
||||||
}
|
restore_engine.restore_all()?;
|
||||||
}
|
Ok(())
|
||||||
|
|
||||||
fn assert_directory_trees_have_same_contents(left: &Path, right: &Path) -> Result<()> {
|
|
||||||
let left_files = get_sorted_files_recursively(left)?;
|
|
||||||
let right_files = get_sorted_files_recursively(right)?;
|
|
||||||
|
|
||||||
let pairs = left_files.iter().zip(right_files);
|
|
||||||
for (l, r) in pairs {
|
|
||||||
assert_eq!(l.file_name(), r.file_name());
|
|
||||||
let mut fl = File::open(l).unwrap();
|
|
||||||
let mut fr = File::open(r).unwrap();
|
|
||||||
let mut bl = vec![];
|
|
||||||
let mut br = vec![];
|
|
||||||
fl.read_to_end(&mut bl).unwrap();
|
|
||||||
fr.read_to_end(&mut br).unwrap();
|
|
||||||
assert_eq!(bl, br);
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_sorted_files_recursively<T: AsRef<Path>>(path: T) -> Result<Vec<Box<Path>>> {
|
|
||||||
let walker = WalkDir::new(path.as_ref()).sort_by(|a, b| a.file_name().cmp(b.file_name()));
|
|
||||||
|
|
||||||
let mut result = vec![];
|
|
||||||
|
|
||||||
for maybe_entry in walker {
|
|
||||||
let entry = maybe_entry?;
|
|
||||||
if entry.path() == path.as_ref() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if entry.path().is_file() {
|
|
||||||
result.push(Box::from(entry.path()));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(result)
|
pub fn backup_file_with_text_contents(
|
||||||
}
|
source: &TestSource,
|
||||||
|
repository_path: &VfsPath,
|
||||||
|
source_file_relative_path: &str,
|
||||||
|
contents: &str,
|
||||||
|
) -> Result<()> {
|
||||||
|
{
|
||||||
|
backup_file_with_byte_contents(source, repository_path, source_file_relative_path, contents.as_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn assert_target_file_contents(restored_path: &Path, expected_contents: &[u8]) -> Result<()> {
|
pub fn backup_file_with_byte_contents(
|
||||||
let mut actual_contents = vec![];
|
source: &TestSource,
|
||||||
assert!(restored_path.exists(), "Expected '{}' to be there", restored_path.display());
|
repository_path: &VfsPath,
|
||||||
File::open(restored_path)?.read_to_end(&mut actual_contents)?;
|
source_file_relative_path: &str,
|
||||||
assert_eq!(expected_contents, actual_contents);
|
contents: &[u8],
|
||||||
Ok(())
|
) -> Result<()> {
|
||||||
|
{
|
||||||
|
let mut backup_repository = Repository::open(repository_path)?;
|
||||||
|
|
||||||
|
let mut backup_engine = backup::Engine::new(source.path(), &mut backup_repository)?;
|
||||||
|
source.write_bytes_to_file(source_file_relative_path, contents).unwrap();
|
||||||
|
backup_engine.backup()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn data_weight(repository_path: &VfsPath) -> Result<u64> {
|
||||||
|
{
|
||||||
|
let repository = Repository::open(repository_path)?;
|
||||||
|
Ok(repository.data_weight()?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_directory_trees_have_same_contents(left: &VfsPath, right: &VfsPath) -> Result<()> {
|
||||||
|
let left_files = get_sorted_files_recursively(left)?;
|
||||||
|
let right_files = get_sorted_files_recursively(right)?;
|
||||||
|
|
||||||
|
let pairs = left_files.iter().zip(right_files);
|
||||||
|
for (l, r) in pairs {
|
||||||
|
assert_eq!(l.filename(), r.filename());
|
||||||
|
let mut fl = l.open_file()?;
|
||||||
|
let mut fr = r.open_file()?;
|
||||||
|
let mut bl = vec![];
|
||||||
|
let mut br = vec![];
|
||||||
|
fl.read_to_end(&mut bl).unwrap();
|
||||||
|
fr.read_to_end(&mut br).unwrap();
|
||||||
|
assert_eq!(bl, br);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_sorted_files_recursively(path: &VfsPath) -> Result<Vec<VfsPath>> {
|
||||||
|
assert!(
|
||||||
|
path.exists(),
|
||||||
|
"[get_sorted_files_recursively] invoked on a path that does not exist: {:?}",
|
||||||
|
path
|
||||||
|
);
|
||||||
|
let walker = path.walk_dir()?;
|
||||||
|
|
||||||
|
let mut result = vec![];
|
||||||
|
|
||||||
|
for maybe_entry in walker {
|
||||||
|
let entry = &maybe_entry?;
|
||||||
|
if entry == path {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if entry.metadata()?.file_type == VfsFileType::File {
|
||||||
|
result.push(entry.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result.sort_by(|a, b| a.filename().cmp(&b.filename()));
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_target_file_contents(restored_path: &VfsPath, expected_contents: &[u8]) -> Result<()> {
|
||||||
|
let mut actual_contents = vec![];
|
||||||
|
assert!(restored_path.exists(), "Expected '{:?}' to be there", restored_path);
|
||||||
|
restored_path.open_file()?.read_to_end(&mut actual_contents)?;
|
||||||
|
assert_eq!(expected_contents, actual_contents);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,45 +1,54 @@
|
||||||
use std::fs::File;
|
|
||||||
use std::io::Error;
|
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::Path;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
|
|
||||||
use tempfile::tempdir;
|
use anyhow::Result;
|
||||||
use tempfile::TempDir;
|
use vfs::VfsPath;
|
||||||
|
|
||||||
|
use super::assertions::in_memory::random_in_memory_path;
|
||||||
|
|
||||||
pub struct TestSource {
|
pub struct TestSource {
|
||||||
directory: TempDir,
|
directory: VfsPath,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestSource {
|
impl TestSource {
|
||||||
pub fn new() -> Result<Self, Error> {
|
pub fn new() -> Result<Self> {
|
||||||
Ok(Self { directory: tempdir()? })
|
let path: VfsPath = random_in_memory_path("testsource")?;
|
||||||
|
path.create_dir_all()?;
|
||||||
|
Ok(Self { directory: path })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_bytes_to_file(&self, filename: &str, bytes: &[u8]) -> Result<(), Error> {
|
pub fn write_bytes_to_file(&self, filename: &str, bytes: &[u8]) -> Result<()> {
|
||||||
let path = self.file_path(filename);
|
let path = self.file_path(filename)?;
|
||||||
Ok(File::create(path)?.write_all(bytes)?)
|
let mut file = path.create_file()?;
|
||||||
|
file.write_all(bytes)?;
|
||||||
|
dbg!(format!("wrote bytes under {}", filename));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_text_to_file(&self, filename: &str, text: &str) -> Result<(), Error> {
|
pub fn write_text_to_file(&self, filename: &str, text: &str) -> Result<()> {
|
||||||
self.write_bytes_to_file(filename, text.as_bytes())
|
self.write_bytes_to_file(filename, text.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn write_random_bytes_to_file(&self, filename: &str, size: u64) -> Result<(), Error> {
|
pub fn write_random_bytes_to_file(&self, filename: &str, size: u64) -> Result<()> {
|
||||||
let random_bytes: Vec<u8> = (0..size).map(|_| rand::random::<u8>()).collect();
|
let random_bytes: Vec<u8> = (0..size).map(|_| rand::random::<u8>()).collect();
|
||||||
self.write_bytes_to_file(filename, &random_bytes)?;
|
self.write_bytes_to_file(filename, &random_bytes)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn path(&self) -> &Path {
|
pub fn path(&self) -> &VfsPath {
|
||||||
self.directory.path()
|
&self.directory
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn file_path(&self, filename: &str) -> PathBuf {
|
pub fn file_path(&self, filename: &str) -> Result<VfsPath> {
|
||||||
self.directory.path().join(filename)
|
let file_path = self.directory.join(filename)?;
|
||||||
|
Ok(file_path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for TestSource {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
let _ = self.path().remove_dir_all();
|
||||||
|
}
|
||||||
|
}
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod must {
|
mod must {
|
||||||
use super::TestSource;
|
use super::TestSource;
|
||||||
|
@ -51,7 +60,7 @@ mod must {
|
||||||
{
|
{
|
||||||
let source = TestSource::new()?;
|
let source = TestSource::new()?;
|
||||||
source.write_random_bytes_to_file("somefile", 1)?;
|
source.write_random_bytes_to_file("somefile", 1)?;
|
||||||
path = source.path().to_path_buf();
|
path = source.path().clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(!path.exists());
|
assert!(!path.exists());
|
||||||
|
|
|
@ -1,50 +1,65 @@
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod must {
|
mod must {
|
||||||
use std::fs;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use bakare::repository::Repository;
|
use bakare::test::source::TestSource;
|
||||||
use bakare::test::{assertions::*, source::TestSource};
|
|
||||||
use bakare::{backup, restore};
|
use bakare::{backup, restore};
|
||||||
use nix::sys::wait::{waitpid, WaitStatus};
|
use bakare::{repository::Repository, test::assertions::in_memory::*};
|
||||||
use nix::unistd::{fork, ForkResult};
|
use nix::unistd::{fork, ForkResult};
|
||||||
use tempfile::tempdir;
|
use nix::{
|
||||||
|
sys::wait::{waitpid, WaitStatus},
|
||||||
|
unistd::getpid,
|
||||||
|
};
|
||||||
|
use vfs::{PhysicalFS, VfsPath};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn handle_concurrent_backups() -> Result<()> {
|
fn handle_concurrent_backups() -> Result<()> {
|
||||||
setup_logger();
|
setup_logger();
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path)?;
|
let repository_directory = tempfile::tempdir()?.into_path();
|
||||||
|
let repository_path: VfsPath = PhysicalFS::new(repository_directory).into();
|
||||||
|
let repository_path = repository_path.join(&format!("repository-{}", getpid()))?;
|
||||||
|
Repository::init(&repository_path)?;
|
||||||
|
|
||||||
let parallel_backups_number = 16;
|
let parallel_backups_number = 16;
|
||||||
let files_per_backup_number = 16;
|
let files_per_backup_number = 16;
|
||||||
let total_number_of_files = parallel_backups_number * files_per_backup_number;
|
let total_number_of_files = parallel_backups_number * files_per_backup_number;
|
||||||
let finished_backup_runs = backup_in_parallel(repository_path, parallel_backups_number, files_per_backup_number)?;
|
|
||||||
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
|
|
||||||
|
|
||||||
let all_restored_files = restore_all(repository_path)?;
|
let finished_backup_runs = backup_in_parallel(&repository_path, parallel_backups_number, files_per_backup_number)?;
|
||||||
|
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
|
||||||
|
assert!(data_weight(&repository_path)? > 0);
|
||||||
|
|
||||||
|
let target_directory = tempfile::tempdir()?.into_path();
|
||||||
|
let target_path: VfsPath = PhysicalFS::new(target_directory).into();
|
||||||
|
let target_path = target_path.join(&format!("target-{}", getpid()))?;
|
||||||
|
let all_restored_files = restore_all(&repository_path, &target_path)?;
|
||||||
assert_eq!(all_restored_files.len(), total_number_of_files);
|
assert_eq!(all_restored_files.len(), total_number_of_files);
|
||||||
|
|
||||||
|
assert_all_files_in_place(parallel_backups_number, files_per_backup_number, &all_restored_files)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn assert_all_files_in_place(
|
||||||
|
parallel_backups_number: usize,
|
||||||
|
files_per_backup_number: usize,
|
||||||
|
all_restored_files: &[VfsPath],
|
||||||
|
) -> Result<()> {
|
||||||
for i in 0..parallel_backups_number {
|
for i in 0..parallel_backups_number {
|
||||||
for j in 0..files_per_backup_number {
|
for j in 0..files_per_backup_number {
|
||||||
let id = file_id(i, j);
|
let id = file_id(i, j);
|
||||||
let file = all_restored_files.iter().find(|f| f.ends_with(id.clone()));
|
let file = all_restored_files.iter().find(|f| f.filename() == id);
|
||||||
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
|
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
|
||||||
let contents = fs::read_to_string(file.unwrap()).unwrap();
|
let contents = file.unwrap().read_to_string()?;
|
||||||
assert_eq!(id.to_string(), contents.to_owned());
|
assert_eq!(id.to_string(), contents.to_owned());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn backup_in_parallel<T>(
|
fn backup_in_parallel(
|
||||||
repository_path: T,
|
repository_path: &VfsPath,
|
||||||
parallel_backups_number: usize,
|
parallel_backups_number: usize,
|
||||||
files_per_backup_number: usize,
|
files_per_backup_number: usize,
|
||||||
) -> Result<Vec<usize>>
|
) -> Result<Vec<usize>> {
|
||||||
where
|
|
||||||
T: AsRef<Path> + Sync,
|
|
||||||
{
|
|
||||||
let task_numbers = (0..parallel_backups_number).collect::<Vec<_>>();
|
let task_numbers = (0..parallel_backups_number).collect::<Vec<_>>();
|
||||||
let mut child_pids = vec![];
|
let mut child_pids = vec![];
|
||||||
for task_number in &task_numbers {
|
for task_number in &task_numbers {
|
||||||
|
@ -73,11 +88,8 @@ mod must {
|
||||||
Ok(task_numbers)
|
Ok(task_numbers)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn backup_process<T>(task_number: usize, repository_path: T, files_per_backup_number: usize) -> Result<()>
|
fn backup_process(task_number: usize, repository_path: &VfsPath, files_per_backup_number: usize) -> Result<()> {
|
||||||
where
|
let mut repository = Repository::open(repository_path)?;
|
||||||
T: AsRef<Path> + Sync,
|
|
||||||
{
|
|
||||||
let mut repository = Repository::open(repository_path.as_ref())?;
|
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
|
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
|
||||||
for i in 0..files_per_backup_number {
|
for i in 0..files_per_backup_number {
|
||||||
|
@ -88,10 +100,9 @@ mod must {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_all<T: AsRef<Path>>(repository_path: T) -> Result<Vec<Box<Path>>> {
|
fn restore_all(repository_path: &VfsPath, restore_target: &VfsPath) -> Result<Vec<VfsPath>> {
|
||||||
let restore_target = tempdir().unwrap().into_path();
|
let mut restore_repository = Repository::open(repository_path)?;
|
||||||
let mut restore_repository = Repository::open(repository_path.as_ref())?;
|
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
||||||
let mut restore_engine = restore::Engine::new(&mut restore_repository, restore_target.as_ref())?;
|
|
||||||
restore_engine.restore_all()?;
|
restore_engine.restore_all()?;
|
||||||
get_sorted_files_recursively(&restore_target)
|
get_sorted_files_recursively(&restore_target)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +1,15 @@
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod must {
|
mod must {
|
||||||
use tempfile::tempdir;
|
use bakare::test::assertions::in_memory::*;
|
||||||
|
use bakare::{repository::Repository, test::source::TestSource};
|
||||||
use bakare::repository::Repository;
|
|
||||||
use bakare::test::{assertions::*, source::TestSource};
|
|
||||||
|
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
proptest! {
|
proptest! {
|
||||||
#[test]
|
#[test]
|
||||||
fn store_duplicated_files_just_once(contents in any::<[u8;3]>()) {
|
fn store_duplicated_files_just_once(contents in any::<[u8;3]>()) {
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
let repository_path = random_in_memory_path("repository").unwrap();
|
||||||
Repository::init(repository_path).unwrap();
|
Repository::init(&repository_path).unwrap();
|
||||||
assert_eq!(data_weight(&repository_path).unwrap(), 0);
|
assert_eq!(data_weight(&repository_path).unwrap(), 0);
|
||||||
|
|
||||||
backup_file_with_byte_contents(&source, &repository_path, "1", &contents).unwrap();
|
backup_file_with_byte_contents(&source, &repository_path, "1", &contents).unwrap();
|
||||||
|
@ -22,8 +20,8 @@ mod must {
|
||||||
let second_weight = data_weight(&repository_path).unwrap();
|
let second_weight = data_weight(&repository_path).unwrap();
|
||||||
assert_eq!(first_weight, second_weight);
|
assert_eq!(first_weight, second_weight);
|
||||||
|
|
||||||
assert_restored_file_contents(repository_path, &source.file_path("1"), &contents).unwrap();
|
assert_restored_file_contents(&repository_path, &source.file_path("1").unwrap(), &contents).unwrap();
|
||||||
assert_restored_file_contents(repository_path, &source.file_path("2"), &contents).unwrap();
|
assert_restored_file_contents(&repository_path, &source.file_path("2").unwrap(), &contents).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,9 @@
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod must {
|
mod must {
|
||||||
use tempfile::tempdir;
|
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use bakare::backup;
|
use bakare::backup;
|
||||||
use bakare::repository::Repository;
|
use bakare::test::assertions::in_memory::*;
|
||||||
use bakare::test::{assertions::*, source::TestSource};
|
use bakare::{repository::Repository, test::source::TestSource};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn restore_multiple_files() -> Result<()> {
|
fn restore_multiple_files() -> Result<()> {
|
||||||
|
@ -15,15 +13,17 @@ mod must {
|
||||||
source.write_text_to_file("second", "some contents").unwrap();
|
source.write_text_to_file("second", "some contents").unwrap();
|
||||||
source.write_text_to_file("third", "some other contents").unwrap();
|
source.write_text_to_file("third", "some other contents").unwrap();
|
||||||
|
|
||||||
|
dbg!("setup done");
|
||||||
assert_same_after_restore(source.path())
|
assert_same_after_restore(source.path())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn restore_files_after_reopening_repository() -> Result<()> {
|
fn restore_files_after_reopening_repository() -> Result<()> {
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
let repository_path = random_in_memory_path("repository")?;
|
||||||
let restore_target = tempdir().unwrap().into_path();
|
let restore_target = random_in_memory_path("target")?;
|
||||||
Repository::init(repository_path)?;
|
|
||||||
|
Repository::init(&repository_path)?;
|
||||||
|
|
||||||
let source_file_relative_path = "some file path";
|
let source_file_relative_path = "some file path";
|
||||||
let original_contents = "some old contents";
|
let original_contents = "some old contents";
|
||||||
|
@ -32,18 +32,18 @@ mod must {
|
||||||
|
|
||||||
restore_all_from_reloaded_repository(&repository_path, &restore_target)?;
|
restore_all_from_reloaded_repository(&repository_path, &restore_target)?;
|
||||||
|
|
||||||
let source_file_full_path = &source.file_path(source_file_relative_path);
|
let source_file_full_path = &source.file_path(source_file_relative_path)?;
|
||||||
assert_restored_file_contents(repository_path, source_file_full_path, original_contents.as_bytes())
|
assert_restored_file_contents(&repository_path, source_file_full_path, original_contents.as_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn restore_older_version_of_file() -> Result<()> {
|
fn restore_older_version_of_file() -> Result<()> {
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let repository_path = tempdir().unwrap().into_path();
|
let repository_path = random_in_memory_path("repository")?;
|
||||||
Repository::init(repository_path.as_path())?;
|
Repository::init(&repository_path)?;
|
||||||
|
|
||||||
let source_file_relative_path = "some path";
|
let source_file_relative_path = "some path";
|
||||||
let source_file_full_path = source.file_path(source_file_relative_path);
|
let source_file_full_path = source.file_path(source_file_relative_path)?;
|
||||||
let old_contents = "some old contents";
|
let old_contents = "some old contents";
|
||||||
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, old_contents)?;
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, old_contents)?;
|
||||||
|
@ -60,11 +60,11 @@ mod must {
|
||||||
#[test]
|
#[test]
|
||||||
fn newer_version_should_be_greater_than_earlier_version() -> Result<()> {
|
fn newer_version_should_be_greater_than_earlier_version() -> Result<()> {
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let repository_path = tempdir().unwrap().into_path();
|
let repository_path = random_in_memory_path("repository")?;
|
||||||
Repository::init(repository_path.as_path())?;
|
Repository::init(&repository_path)?;
|
||||||
|
|
||||||
let source_file_relative_path = "some path";
|
let source_file_relative_path = "some path";
|
||||||
let source_file_full_path = source.file_path(source_file_relative_path);
|
let source_file_full_path = source.file_path(source_file_relative_path)?;
|
||||||
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old")?;
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old")?;
|
||||||
|
|
||||||
|
@ -84,24 +84,25 @@ mod must {
|
||||||
#[test]
|
#[test]
|
||||||
fn restore_latest_version_by_default() -> Result<()> {
|
fn restore_latest_version_by_default() -> Result<()> {
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
let repository_path = random_in_memory_path("repository")?;
|
||||||
Repository::init(repository_path)?;
|
Repository::init(&repository_path)?;
|
||||||
|
|
||||||
let source_file_relative_path = "some path";
|
let source_file_relative_path = "some path";
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old contents")?;
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old contents")?;
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newer contents")?;
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newer contents")?;
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newest contents")?;
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newest contents")?;
|
||||||
|
|
||||||
let source_file_full_path = &source.file_path(source_file_relative_path);
|
let source_file_full_path = &source.file_path(source_file_relative_path)?;
|
||||||
assert_restored_file_contents(repository_path, source_file_full_path, b"newest contents")
|
assert_restored_file_contents(&repository_path, source_file_full_path, b"newest contents")
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn forbid_backup_of_paths_within_repository() -> Result<()> {
|
fn forbid_backup_of_paths_within_repository() -> Result<()> {
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
let repository_path = random_in_memory_path("repository")?;
|
||||||
Repository::init(repository_path)?;
|
Repository::init(&repository_path)?;
|
||||||
let mut repository = Repository::open(repository_path)?;
|
let mut repository = Repository::open(&repository_path)?;
|
||||||
let error = backup::Engine::new(repository_path, &mut repository);
|
|
||||||
|
let error = backup::Engine::new(&repository_path, &mut repository);
|
||||||
assert!(error.is_err());
|
assert!(error.is_err());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue