2020-11-28 14:29:23 +00:00
|
|
|
#[cfg(test)]
|
|
|
|
mod must {
|
2021-05-16 19:15:07 +01:00
|
|
|
use std::{
|
|
|
|
fs,
|
|
|
|
path::{Path, PathBuf},
|
|
|
|
};
|
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
use anyhow::Result;
|
2020-12-25 21:52:40 +00:00
|
|
|
use bakare::test::source::TestSource;
|
2020-11-28 14:29:23 +00:00
|
|
|
use bakare::{backup, restore};
|
2020-12-25 21:52:40 +00:00
|
|
|
use bakare::{repository::Repository, test::assertions::in_memory::*};
|
2020-11-28 14:29:23 +00:00
|
|
|
use nix::unistd::{fork, ForkResult};
|
2020-12-25 21:52:40 +00:00
|
|
|
use nix::{
|
|
|
|
sys::wait::{waitpid, WaitStatus},
|
|
|
|
unistd::getpid,
|
|
|
|
};
|
2021-05-16 19:15:07 +01:00
|
|
|
use tempfile::tempdir;
|
2020-12-25 21:52:40 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
#[test]
|
|
|
|
fn handle_concurrent_backups() -> Result<()> {
|
|
|
|
setup_logger();
|
2020-12-25 21:52:40 +00:00
|
|
|
|
2021-05-16 19:15:07 +01:00
|
|
|
let dir = tempdir()?;
|
|
|
|
let repository_path = dir.path();
|
|
|
|
let repository_path = repository_path.join(&format!("repository-{}", getpid()));
|
2020-12-25 21:52:40 +00:00
|
|
|
Repository::init(&repository_path)?;
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
let parallel_backups_number = 16;
|
|
|
|
let files_per_backup_number = 16;
|
|
|
|
let total_number_of_files = parallel_backups_number * files_per_backup_number;
|
2020-12-25 21:52:40 +00:00
|
|
|
|
|
|
|
let finished_backup_runs = backup_in_parallel(&repository_path, parallel_backups_number, files_per_backup_number)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
|
2020-12-25 21:52:40 +00:00
|
|
|
assert!(data_weight(&repository_path)? > 0);
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2021-05-16 19:15:07 +01:00
|
|
|
let target_path = tempdir()?;
|
2021-10-22 21:20:53 +01:00
|
|
|
let all_restored_files = restore_all(&repository_path, target_path.path())?;
|
2020-11-28 14:29:23 +00:00
|
|
|
assert_eq!(all_restored_files.len(), total_number_of_files);
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-12-25 21:52:40 +00:00
|
|
|
assert_all_files_in_place(parallel_backups_number, files_per_backup_number, &all_restored_files)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn assert_all_files_in_place(
|
|
|
|
parallel_backups_number: usize,
|
|
|
|
files_per_backup_number: usize,
|
2021-05-16 19:15:07 +01:00
|
|
|
all_restored_files: &[PathBuf],
|
2020-12-25 21:52:40 +00:00
|
|
|
) -> Result<()> {
|
2020-11-28 14:29:23 +00:00
|
|
|
for i in 0..parallel_backups_number {
|
|
|
|
for j in 0..files_per_backup_number {
|
|
|
|
let id = file_id(i, j);
|
2021-05-16 19:15:07 +01:00
|
|
|
let file = all_restored_files
|
|
|
|
.iter()
|
|
|
|
.find(|f| f.file_name().unwrap().to_string_lossy() == id);
|
|
|
|
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
|
|
|
|
let contents = fs::read_to_string(file.unwrap())?;
|
2020-11-28 14:29:23 +00:00
|
|
|
assert_eq!(id.to_string(), contents.to_owned());
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
2020-11-28 14:29:23 +00:00
|
|
|
Ok(())
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
|
|
|
|
2020-12-25 21:52:40 +00:00
|
|
|
fn backup_in_parallel(
|
2021-05-16 19:15:07 +01:00
|
|
|
repository_path: &Path,
|
2020-11-28 14:29:23 +00:00
|
|
|
parallel_backups_number: usize,
|
|
|
|
files_per_backup_number: usize,
|
2020-12-25 21:52:40 +00:00
|
|
|
) -> Result<Vec<usize>> {
|
2020-11-28 14:29:23 +00:00
|
|
|
let task_numbers = (0..parallel_backups_number).collect::<Vec<_>>();
|
|
|
|
let mut child_pids = vec![];
|
|
|
|
for task_number in &task_numbers {
|
|
|
|
match unsafe { fork() } {
|
|
|
|
Ok(ForkResult::Parent { child }) => {
|
|
|
|
child_pids.push(child);
|
|
|
|
}
|
|
|
|
Ok(ForkResult::Child) => {
|
2021-10-22 21:20:53 +01:00
|
|
|
backup_process(*task_number, repository_path, files_per_backup_number)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
std::process::exit(0);
|
|
|
|
}
|
2020-11-08 20:08:27 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
Err(_) => panic!("fork failed"),
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
2020-11-28 14:29:23 +00:00
|
|
|
for pid in child_pids {
|
|
|
|
let status = waitpid(Some(pid), None)?;
|
|
|
|
match status {
|
|
|
|
WaitStatus::Exited(pid, code) => {
|
|
|
|
assert!(code == 0, "failed the wait for {} with code {}", pid, code);
|
|
|
|
}
|
|
|
|
WaitStatus::Signaled(pid, _, _) => panic!("failed with signal for {}", pid),
|
|
|
|
_ => panic!("unknown state"),
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-28 14:29:23 +00:00
|
|
|
Ok(task_numbers)
|
2020-11-08 20:08:27 +00:00
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2021-05-16 19:15:07 +01:00
|
|
|
fn backup_process(task_number: usize, repository_path: &Path, files_per_backup_number: usize) -> Result<()> {
|
2020-12-25 21:52:40 +00:00
|
|
|
let mut repository = Repository::open(repository_path)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
let source = TestSource::new().unwrap();
|
|
|
|
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
|
|
|
|
for i in 0..files_per_backup_number {
|
|
|
|
let id = file_id(task_number, i);
|
|
|
|
source.write_text_to_file(&id, &id).unwrap();
|
|
|
|
}
|
|
|
|
backup_engine.backup()?;
|
|
|
|
Ok(())
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|
|
|
|
|
2021-05-16 19:15:07 +01:00
|
|
|
fn restore_all(repository_path: &Path, restore_target: &Path) -> Result<Vec<PathBuf>> {
|
2020-12-25 21:52:40 +00:00
|
|
|
let mut restore_repository = Repository::open(repository_path)?;
|
2021-10-22 21:20:53 +01:00
|
|
|
let mut restore_engine = restore::Engine::new(&mut restore_repository, restore_target)?;
|
2020-11-28 14:29:23 +00:00
|
|
|
restore_engine.restore_all()?;
|
2021-10-22 21:20:53 +01:00
|
|
|
get_sorted_files_recursively(restore_target)
|
2020-11-28 14:29:23 +00:00
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
fn setup_logger() {
|
|
|
|
femme::with_level(log::LevelFilter::Info);
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
|
2020-11-28 14:29:23 +00:00
|
|
|
fn file_id(i: usize, j: usize) -> String {
|
|
|
|
format!("{}-{}", i, j)
|
|
|
|
}
|
2020-11-08 14:27:26 +00:00
|
|
|
}
|