2020-11-28 14:29:23 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod must {
|
|
|
|
use anyhow::Result;
|
2020-12-25 21:52:40 +00:00
|
|
|
use bakare::test::source::TestSource;
|
2020-11-28 14:29:23 +00:00
|
|
|
use bakare::{backup, restore};
|
2020-12-25 21:52:40 +00:00
|
|
|
use bakare::{repository::Repository, test::assertions::in_memory::*};
|
2020-11-28 14:29:23 +00:00
|
|
|
use nix::unistd::{fork, ForkResult};
|
2020-12-25 21:52:40 +00:00
|
|
|
use nix::{
|
|
|
|
sys::wait::{waitpid, WaitStatus},
|
|
|
|
unistd::getpid,
|
|
|
|
};
|
|
|
|
use vfs::{PhysicalFS, VfsPath};
|
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
#[test]
|
|
|
|
fn handle_concurrent_backups() -> Result<()> {
|
|
|
|
setup_logger();
|
2020-12-25 21:52:40 +00:00
|
|
|
|
|
|
|
let repository_directory = tempfile::tempdir()?.into_path();
|
|
|
|
let repository_path: VfsPath = PhysicalFS::new(repository_directory).into();
|
|
|
|
let repository_path = repository_path.join(&format!("repository-{}", getpid()))?;
|
|
|
|
Repository::init(&repository_path)?;
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
let parallel_backups_number = 16;
|
|
|
|
let files_per_backup_number = 16;
|
|
|
|
let total_number_of_files = parallel_backups_number * files_per_backup_number;
|
2020-12-25 21:52:40 +00:00
|
|
|
|
|
|
|
let finished_backup_runs = backup_in_parallel(&repository_path, parallel_backups_number, files_per_backup_number)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
|
2020-12-25 21:52:40 +00:00
|
|
|
assert!(data_weight(&repository_path)? > 0);
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-12-27 18:19:01 +00:00
|
|
|
let target_path: VfsPath = random_in_memory_path("target")?;
|
2020-12-25 21:52:40 +00:00
|
|
|
let all_restored_files = restore_all(&repository_path, &target_path)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
assert_eq!(all_restored_files.len(), total_number_of_files);
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-12-25 21:52:40 +00:00
|
|
|
assert_all_files_in_place(parallel_backups_number, files_per_backup_number, &all_restored_files)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn assert_all_files_in_place(
|
|
|
|
parallel_backups_number: usize,
|
|
|
|
files_per_backup_number: usize,
|
|
|
|
all_restored_files: &[VfsPath],
|
|
|
|
) -> Result<()> {
|
2020-11-28 14:29:23 +00:00
|
|
|
for i in 0..parallel_backups_number {
|
|
|
|
for j in 0..files_per_backup_number {
|
|
|
|
let id = file_id(i, j);
|
2020-12-25 21:52:40 +00:00
|
|
|
let file = all_restored_files.iter().find(|f| f.filename() == id);
|
2020-11-28 14:29:23 +00:00
|
|
|
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
|
2020-12-25 21:52:40 +00:00
|
|
|
let contents = file.unwrap().read_to_string()?;
|
2020-11-28 14:29:23 +00:00
|
|
|
assert_eq!(id.to_string(), contents.to_owned());
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
2020-11-28 14:29:23 +00:00
|
|
|
Ok(())
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
|
|
|
|
2020-12-25 21:52:40 +00:00
|
|
|
fn backup_in_parallel(
|
|
|
|
repository_path: &VfsPath,
|
2020-11-28 14:29:23 +00:00
|
|
|
parallel_backups_number: usize,
|
|
|
|
files_per_backup_number: usize,
|
2020-12-25 21:52:40 +00:00
|
|
|
) -> Result<Vec<usize>> {
|
2020-11-28 14:29:23 +00:00
|
|
|
let task_numbers = (0..parallel_backups_number).collect::<Vec<_>>();
|
|
|
|
let mut child_pids = vec![];
|
|
|
|
for task_number in &task_numbers {
|
|
|
|
match unsafe { fork() } {
|
|
|
|
Ok(ForkResult::Parent { child }) => {
|
|
|
|
child_pids.push(child);
|
|
|
|
}
|
|
|
|
Ok(ForkResult::Child) => {
|
|
|
|
backup_process(*task_number, &repository_path, files_per_backup_number)?;
|
|
|
|
std::process::exit(0);
|
|
|
|
}
|
2020-11-08 20:08:27 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
Err(_) => panic!("fork failed"),
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
2020-11-28 14:29:23 +00:00
|
|
|
for pid in child_pids {
|
|
|
|
let status = waitpid(Some(pid), None)?;
|
|
|
|
match status {
|
|
|
|
WaitStatus::Exited(pid, code) => {
|
|
|
|
assert!(code == 0, "failed the wait for {} with code {}", pid, code);
|
|
|
|
}
|
|
|
|
WaitStatus::Signaled(pid, _, _) => panic!("failed with signal for {}", pid),
|
|
|
|
_ => panic!("unknown state"),
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-28 14:29:23 +00:00
|
|
|
Ok(task_numbers)
|
2020-11-08 20:08:27 +00:00
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-12-25 21:52:40 +00:00
|
|
|
fn backup_process(task_number: usize, repository_path: &VfsPath, files_per_backup_number: usize) -> Result<()> {
|
|
|
|
let mut repository = Repository::open(repository_path)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
let source = TestSource::new().unwrap();
|
|
|
|
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
|
|
|
|
for i in 0..files_per_backup_number {
|
|
|
|
let id = file_id(task_number, i);
|
|
|
|
source.write_text_to_file(&id, &id).unwrap();
|
|
|
|
}
|
|
|
|
backup_engine.backup()?;
|
|
|
|
Ok(())
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
|
|
|
|
2020-12-25 21:52:40 +00:00
|
|
|
fn restore_all(repository_path: &VfsPath, restore_target: &VfsPath) -> Result<Vec<VfsPath>> {
|
|
|
|
let mut restore_repository = Repository::open(repository_path)?;
|
|
|
|
let mut restore_engine = restore::Engine::new(&mut restore_repository, &restore_target)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
restore_engine.restore_all()?;
|
|
|
|
get_sorted_files_recursively(&restore_target)
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
fn setup_logger() {
|
|
|
|
femme::with_level(log::LevelFilter::Info);
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
fn file_id(i: usize, j: usize) -> String {
|
|
|
|
format!("{}-{}", i, j)
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|