add wrapping modules in integration tests
This commit is contained in:
parent
5728645add
commit
4266f48b03
3 changed files with 218 additions and 211 deletions
|
@ -1,104 +1,106 @@
|
||||||
use std::fs;
|
#[cfg(test)]
|
||||||
use std::path::Path;
|
mod must {
|
||||||
|
use std::fs;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use bakare::repository::Repository;
|
use bakare::repository::Repository;
|
||||||
use bakare::test::{assertions::*, source::TestSource};
|
use bakare::test::{assertions::*, source::TestSource};
|
||||||
use bakare::{backup, restore};
|
use bakare::{backup, restore};
|
||||||
use nix::sys::wait::{waitpid, WaitStatus};
|
use nix::sys::wait::{waitpid, WaitStatus};
|
||||||
use nix::unistd::{fork, ForkResult};
|
use nix::unistd::{fork, ForkResult};
|
||||||
use tempfile::tempdir;
|
use tempfile::tempdir;
|
||||||
|
#[test]
|
||||||
|
fn handle_concurrent_backups() -> Result<()> {
|
||||||
|
setup_logger();
|
||||||
|
let repository_path = &tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path)?;
|
||||||
|
|
||||||
#[test]
|
let parallel_backups_number = 16;
|
||||||
fn handle_concurrent_backups() -> Result<()> {
|
let files_per_backup_number = 16;
|
||||||
setup_logger();
|
let total_number_of_files = parallel_backups_number * files_per_backup_number;
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
let finished_backup_runs = backup_in_parallel(repository_path, parallel_backups_number, files_per_backup_number)?;
|
||||||
Repository::init(repository_path)?;
|
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
|
||||||
|
|
||||||
let parallel_backups_number = 16;
|
let all_restored_files = restore_all(repository_path)?;
|
||||||
let files_per_backup_number = 16;
|
assert_eq!(all_restored_files.len(), total_number_of_files);
|
||||||
let total_number_of_files = parallel_backups_number * files_per_backup_number;
|
|
||||||
let finished_backup_runs = backup_in_parallel(repository_path, parallel_backups_number, files_per_backup_number)?;
|
|
||||||
assert_eq!(finished_backup_runs.len(), parallel_backups_number);
|
|
||||||
|
|
||||||
let all_restored_files = restore_all(repository_path)?;
|
for i in 0..parallel_backups_number {
|
||||||
assert_eq!(all_restored_files.len(), total_number_of_files);
|
for j in 0..files_per_backup_number {
|
||||||
|
let id = file_id(i, j);
|
||||||
for i in 0..parallel_backups_number {
|
let file = all_restored_files.iter().find(|f| f.ends_with(id.clone()));
|
||||||
for j in 0..files_per_backup_number {
|
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
|
||||||
let id = file_id(i, j);
|
let contents = fs::read_to_string(file.unwrap()).unwrap();
|
||||||
let file = all_restored_files.iter().find(|f| f.ends_with(id.clone()));
|
assert_eq!(id.to_string(), contents.to_owned());
|
||||||
assert!(file.unwrap().exists(), "file {:?} does not exist", file);
|
|
||||||
let contents = fs::read_to_string(file.unwrap()).unwrap();
|
|
||||||
assert_eq!(id.to_string(), contents.to_owned());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backup_in_parallel<T>(
|
|
||||||
repository_path: T,
|
|
||||||
parallel_backups_number: usize,
|
|
||||||
files_per_backup_number: usize,
|
|
||||||
) -> Result<Vec<usize>>
|
|
||||||
where
|
|
||||||
T: AsRef<Path> + Sync,
|
|
||||||
{
|
|
||||||
let task_numbers = (0..parallel_backups_number).collect::<Vec<_>>();
|
|
||||||
let mut child_pids = vec![];
|
|
||||||
for task_number in &task_numbers {
|
|
||||||
match unsafe { fork() } {
|
|
||||||
Ok(ForkResult::Parent { child }) => {
|
|
||||||
child_pids.push(child);
|
|
||||||
}
|
}
|
||||||
Ok(ForkResult::Child) => {
|
|
||||||
backup_process(*task_number, &repository_path, files_per_backup_number)?;
|
|
||||||
std::process::exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(_) => panic!("fork failed"),
|
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
for pid in child_pids {
|
|
||||||
let status = waitpid(Some(pid), None)?;
|
fn backup_in_parallel<T>(
|
||||||
match status {
|
repository_path: T,
|
||||||
WaitStatus::Exited(pid, code) => {
|
parallel_backups_number: usize,
|
||||||
assert!(code == 0, "failed the wait for {} with code {}", pid, code);
|
files_per_backup_number: usize,
|
||||||
|
) -> Result<Vec<usize>>
|
||||||
|
where
|
||||||
|
T: AsRef<Path> + Sync,
|
||||||
|
{
|
||||||
|
let task_numbers = (0..parallel_backups_number).collect::<Vec<_>>();
|
||||||
|
let mut child_pids = vec![];
|
||||||
|
for task_number in &task_numbers {
|
||||||
|
match unsafe { fork() } {
|
||||||
|
Ok(ForkResult::Parent { child }) => {
|
||||||
|
child_pids.push(child);
|
||||||
|
}
|
||||||
|
Ok(ForkResult::Child) => {
|
||||||
|
backup_process(*task_number, &repository_path, files_per_backup_number)?;
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(_) => panic!("fork failed"),
|
||||||
}
|
}
|
||||||
WaitStatus::Signaled(pid, _, _) => panic!("failed with signal for {}", pid),
|
|
||||||
_ => panic!("unknown state"),
|
|
||||||
}
|
}
|
||||||
|
for pid in child_pids {
|
||||||
|
let status = waitpid(Some(pid), None)?;
|
||||||
|
match status {
|
||||||
|
WaitStatus::Exited(pid, code) => {
|
||||||
|
assert!(code == 0, "failed the wait for {} with code {}", pid, code);
|
||||||
|
}
|
||||||
|
WaitStatus::Signaled(pid, _, _) => panic!("failed with signal for {}", pid),
|
||||||
|
_ => panic!("unknown state"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(task_numbers)
|
||||||
}
|
}
|
||||||
Ok(task_numbers)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backup_process<T>(task_number: usize, repository_path: T, files_per_backup_number: usize) -> Result<()>
|
fn backup_process<T>(task_number: usize, repository_path: T, files_per_backup_number: usize) -> Result<()>
|
||||||
where
|
where
|
||||||
T: AsRef<Path> + Sync,
|
T: AsRef<Path> + Sync,
|
||||||
{
|
{
|
||||||
let mut repository = Repository::open(repository_path.as_ref())?;
|
let mut repository = Repository::open(repository_path.as_ref())?;
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
|
let mut backup_engine = backup::Engine::new(source.path(), &mut repository)?;
|
||||||
for i in 0..files_per_backup_number {
|
for i in 0..files_per_backup_number {
|
||||||
let id = file_id(task_number, i);
|
let id = file_id(task_number, i);
|
||||||
source.write_text_to_file(&id, &id).unwrap();
|
source.write_text_to_file(&id, &id).unwrap();
|
||||||
|
}
|
||||||
|
backup_engine.backup()?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
backup_engine.backup()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn restore_all<T: AsRef<Path>>(repository_path: T) -> Result<Vec<Box<Path>>> {
|
fn restore_all<T: AsRef<Path>>(repository_path: T) -> Result<Vec<Box<Path>>> {
|
||||||
let restore_target = tempdir().unwrap().into_path();
|
let restore_target = tempdir().unwrap().into_path();
|
||||||
let mut restore_repository = Repository::open(repository_path.as_ref())?;
|
let mut restore_repository = Repository::open(repository_path.as_ref())?;
|
||||||
let mut restore_engine = restore::Engine::new(&mut restore_repository, restore_target.as_ref())?;
|
let mut restore_engine = restore::Engine::new(&mut restore_repository, restore_target.as_ref())?;
|
||||||
restore_engine.restore_all()?;
|
restore_engine.restore_all()?;
|
||||||
get_sorted_files_recursively(&restore_target)
|
get_sorted_files_recursively(&restore_target)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn setup_logger() {
|
fn setup_logger() {
|
||||||
femme::with_level(log::LevelFilter::Info);
|
femme::with_level(log::LevelFilter::Info);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn file_id(i: usize, j: usize) -> String {
|
fn file_id(i: usize, j: usize) -> String {
|
||||||
format!("{}-{}", i, j)
|
format!("{}-{}", i, j)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,27 +1,29 @@
|
||||||
use tempfile::tempdir;
|
#[cfg(test)]
|
||||||
|
mod must {
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
use bakare::repository::Repository;
|
use bakare::repository::Repository;
|
||||||
use bakare::test::{assertions::*, source::TestSource};
|
use bakare::test::{assertions::*, source::TestSource};
|
||||||
|
|
||||||
use proptest::prelude::*;
|
use proptest::prelude::*;
|
||||||
|
proptest! {
|
||||||
|
#[test]
|
||||||
|
fn store_duplicated_files_just_once(contents in any::<[u8;3]>()) {
|
||||||
|
let source = TestSource::new().unwrap();
|
||||||
|
let repository_path = &tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path).unwrap();
|
||||||
|
assert_eq!(data_weight(&repository_path).unwrap(), 0);
|
||||||
|
|
||||||
proptest! {
|
backup_file_with_byte_contents(&source, &repository_path, "1", &contents).unwrap();
|
||||||
#[test]
|
let first_weight = data_weight(&repository_path).unwrap();
|
||||||
fn store_duplicated_files_just_once(contents in any::<[u8;3]>()) {
|
assert!(first_weight > 0);
|
||||||
let source = TestSource::new().unwrap();
|
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path).unwrap();
|
|
||||||
assert_eq!(data_weight(&repository_path).unwrap(), 0);
|
|
||||||
|
|
||||||
backup_file_with_byte_contents(&source, &repository_path, "1", &contents).unwrap();
|
backup_file_with_byte_contents(&source, &repository_path, "2", &contents).unwrap();
|
||||||
let first_weight = data_weight(&repository_path).unwrap();
|
let second_weight = data_weight(&repository_path).unwrap();
|
||||||
assert!(first_weight > 0);
|
assert_eq!(first_weight, second_weight);
|
||||||
|
|
||||||
backup_file_with_byte_contents(&source, &repository_path, "2", &contents).unwrap();
|
assert_restored_file_contents(repository_path, &source.file_path("1"), &contents).unwrap();
|
||||||
let second_weight = data_weight(&repository_path).unwrap();
|
assert_restored_file_contents(repository_path, &source.file_path("2"), &contents).unwrap();
|
||||||
assert_eq!(first_weight, second_weight);
|
}
|
||||||
|
|
||||||
assert_restored_file_contents(repository_path, &source.file_path("1"), &contents).unwrap();
|
|
||||||
assert_restored_file_contents(repository_path, &source.file_path("2"), &contents).unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,108 +1,111 @@
|
||||||
use tempfile::tempdir;
|
#[cfg(test)]
|
||||||
|
mod must {
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use bakare::backup;
|
use bakare::backup;
|
||||||
use bakare::repository::Repository;
|
use bakare::repository::Repository;
|
||||||
use bakare::test::{assertions::*, source::TestSource};
|
use bakare::test::{assertions::*, source::TestSource};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn restore_multiple_files() -> Result<()> {
|
fn restore_multiple_files() -> Result<()> {
|
||||||
let source = TestSource::new().unwrap();
|
let source = TestSource::new().unwrap();
|
||||||
|
|
||||||
source.write_text_to_file("first", "some contents").unwrap();
|
source.write_text_to_file("first", "some contents").unwrap();
|
||||||
source.write_text_to_file("second", "some contents").unwrap();
|
source.write_text_to_file("second", "some contents").unwrap();
|
||||||
source.write_text_to_file("third", "some other contents").unwrap();
|
source.write_text_to_file("third", "some other contents").unwrap();
|
||||||
|
|
||||||
assert_same_after_restore(source.path())
|
assert_same_after_restore(source.path())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn restore_files_after_reopening_repository() -> Result<()> {
|
||||||
|
let source = TestSource::new().unwrap();
|
||||||
|
let repository_path = &tempdir().unwrap().into_path();
|
||||||
|
let restore_target = tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path)?;
|
||||||
|
|
||||||
|
let source_file_relative_path = "some file path";
|
||||||
|
let original_contents = "some old contents";
|
||||||
|
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, original_contents)?;
|
||||||
|
|
||||||
|
restore_all_from_reloaded_repository(&repository_path, &restore_target)?;
|
||||||
|
|
||||||
|
let source_file_full_path = &source.file_path(source_file_relative_path);
|
||||||
|
assert_restored_file_contents(repository_path, source_file_full_path, original_contents.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn restore_older_version_of_file() -> Result<()> {
|
||||||
|
let source = TestSource::new().unwrap();
|
||||||
|
let repository_path = tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path.as_path())?;
|
||||||
|
|
||||||
|
let source_file_relative_path = "some path";
|
||||||
|
let source_file_full_path = source.file_path(source_file_relative_path);
|
||||||
|
let old_contents = "some old contents";
|
||||||
|
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, old_contents)?;
|
||||||
|
|
||||||
|
let old_item = newest_item(&repository_path, &source_file_full_path)?;
|
||||||
|
let old_id = old_item.id();
|
||||||
|
|
||||||
|
let new_contents = "totally new contents";
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, new_contents)?;
|
||||||
|
|
||||||
|
assert_restored_from_version_has_contents(&repository_path, &source_file_full_path, old_contents.as_bytes(), &old_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn newer_version_should_be_greater_than_earlier_version() -> Result<()> {
|
||||||
|
let source = TestSource::new().unwrap();
|
||||||
|
let repository_path = tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path.as_path())?;
|
||||||
|
|
||||||
|
let source_file_relative_path = "some path";
|
||||||
|
let source_file_full_path = source.file_path(source_file_relative_path);
|
||||||
|
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old")?;
|
||||||
|
|
||||||
|
let old_item = newest_item(&repository_path, &source_file_full_path)?;
|
||||||
|
let old_version = old_item.version();
|
||||||
|
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "new")?;
|
||||||
|
|
||||||
|
let new_item = newest_item(&repository_path, &source_file_full_path)?;
|
||||||
|
let new_version = new_item.version();
|
||||||
|
|
||||||
|
assert!(new_version > old_version);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn restore_latest_version_by_default() -> Result<()> {
|
||||||
|
let source = TestSource::new().unwrap();
|
||||||
|
let repository_path = &tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path)?;
|
||||||
|
|
||||||
|
let source_file_relative_path = "some path";
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old contents")?;
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newer contents")?;
|
||||||
|
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newest contents")?;
|
||||||
|
|
||||||
|
let source_file_full_path = &source.file_path(source_file_relative_path);
|
||||||
|
assert_restored_file_contents(repository_path, source_file_full_path, b"newest contents")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn forbid_backup_of_paths_within_repository() -> Result<()> {
|
||||||
|
let repository_path = &tempdir().unwrap().into_path();
|
||||||
|
Repository::init(repository_path)?;
|
||||||
|
let mut repository = Repository::open(repository_path)?;
|
||||||
|
let error = backup::Engine::new(repository_path, &mut repository);
|
||||||
|
assert!(error.is_err());
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
// TODO: index corruption
|
||||||
|
// TODO: encryption
|
||||||
|
// TODO: resume from sleep while backup in progress
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn restore_files_after_reopening_repository() -> Result<()> {
|
|
||||||
let source = TestSource::new().unwrap();
|
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
|
||||||
let restore_target = tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path)?;
|
|
||||||
|
|
||||||
let source_file_relative_path = "some file path";
|
|
||||||
let original_contents = "some old contents";
|
|
||||||
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, original_contents)?;
|
|
||||||
|
|
||||||
restore_all_from_reloaded_repository(&repository_path, &restore_target)?;
|
|
||||||
|
|
||||||
let source_file_full_path = &source.file_path(source_file_relative_path);
|
|
||||||
assert_restored_file_contents(repository_path, source_file_full_path, original_contents.as_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn restore_older_version_of_file() -> Result<()> {
|
|
||||||
let source = TestSource::new().unwrap();
|
|
||||||
let repository_path = tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path.as_path())?;
|
|
||||||
|
|
||||||
let source_file_relative_path = "some path";
|
|
||||||
let source_file_full_path = source.file_path(source_file_relative_path);
|
|
||||||
let old_contents = "some old contents";
|
|
||||||
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, old_contents)?;
|
|
||||||
|
|
||||||
let old_item = newest_item(&repository_path, &source_file_full_path)?;
|
|
||||||
let old_id = old_item.id();
|
|
||||||
|
|
||||||
let new_contents = "totally new contents";
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, new_contents)?;
|
|
||||||
|
|
||||||
assert_restored_from_version_has_contents(&repository_path, &source_file_full_path, old_contents.as_bytes(), &old_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn newer_version_should_be_greater_than_earlier_version() -> Result<()> {
|
|
||||||
let source = TestSource::new().unwrap();
|
|
||||||
let repository_path = tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path.as_path())?;
|
|
||||||
|
|
||||||
let source_file_relative_path = "some path";
|
|
||||||
let source_file_full_path = source.file_path(source_file_relative_path);
|
|
||||||
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old")?;
|
|
||||||
|
|
||||||
let old_item = newest_item(&repository_path, &source_file_full_path)?;
|
|
||||||
let old_version = old_item.version();
|
|
||||||
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "new")?;
|
|
||||||
|
|
||||||
let new_item = newest_item(&repository_path, &source_file_full_path)?;
|
|
||||||
let new_version = new_item.version();
|
|
||||||
|
|
||||||
assert!(new_version > old_version);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn restore_latest_version_by_default() -> Result<()> {
|
|
||||||
let source = TestSource::new().unwrap();
|
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path)?;
|
|
||||||
|
|
||||||
let source_file_relative_path = "some path";
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "old contents")?;
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newer contents")?;
|
|
||||||
backup_file_with_text_contents(&source, &repository_path, source_file_relative_path, "newest contents")?;
|
|
||||||
|
|
||||||
let source_file_full_path = &source.file_path(source_file_relative_path);
|
|
||||||
assert_restored_file_contents(repository_path, source_file_full_path, b"newest contents")
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn forbid_backup_of_paths_within_repository() -> Result<()> {
|
|
||||||
let repository_path = &tempdir().unwrap().into_path();
|
|
||||||
Repository::init(repository_path)?;
|
|
||||||
let mut repository = Repository::open(repository_path)?;
|
|
||||||
let error = backup::Engine::new(repository_path, &mut repository);
|
|
||||||
assert!(error.is_err());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
// TODO: index corruption
|
|
||||||
// TODO: encryption
|
|
||||||
// TODO: resume from sleep while backup in progress
|
|
||||||
|
|
Loading…
Reference in a new issue