Compare commits

..

2 Commits

Author SHA1 Message Date
grimhilt
1df9c3fba5 test(status): basics tests 2024-09-08 00:18:48 +02:00
grimhilt
250018c4bf feat(indexer+status): make some basics case work 2024-09-08 00:18:33 +02:00
14 changed files with 701 additions and 78 deletions

View File

@ -24,9 +24,7 @@ pub fn exec(args: AddArgs, config: Config) {
// Init indexer
let mut indexer = Indexer::new(config.get_root_unsafe());
dbg!(indexer.save());
dbg!(indexer.load());
return;
let _ = indexer.load();
nsobject::init(config.get_root_unsafe());
@ -40,6 +38,7 @@ pub fn exec(args: AddArgs, config: Config) {
if path_to_add.exists() {
if path_to_add.is_dir() {
indexer.index_dir(path_to_add.clone());
add_dir(&path_to_add, &mut ignorer, &mut indexer);
} else {
indexer.index_file(path_to_add);
@ -53,6 +52,8 @@ pub fn exec(args: AddArgs, config: Config) {
panic!("fatal: pathspec '{}' did not match any files", obj_to_add);
}
}
dbg!(indexer.save());
/*
for all files
if globbing

View File

@ -1,12 +1,14 @@
use crate::config::config::Config;
use crate::store::{
indexer::Indexer,
nsobject::NsObject,
object::{Obj, ObjStatus},
object::{Obj, ObjStatus, ObjType},
};
use crate::utils::path;
use colored::{ColoredString, Colorize};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use threadpool::ThreadPool;
@ -14,10 +16,19 @@ pub struct StatusArgs {
pub nostyle: bool,
}
type HashMapObj = HashMap<String, Obj>;
type HashMapObjStatuses = Arc<Mutex<HashMapObj>>;
struct ObjStatuses {
created: Arc<Mutex<HashMap<String, Obj>>>,
modified: Arc<Mutex<HashMap<String, Obj>>>,
deleted: Arc<Mutex<HashMap<String, Obj>>>,
created: HashMapObjStatuses,
modified: HashMapObjStatuses,
deleted: HashMapObjStatuses,
// staged: Arc<Mutex<Vec<Obj>>>,
// not_staged: Arc<Mutex<Vec<Obj>>>,
// untracked: Arc<Mutex<Vec<Obj>>>,
staged: Vec<Obj>,
not_staged: Vec<Obj>,
untracked: Vec<Obj>,
}
impl ObjStatuses {
@ -26,6 +37,12 @@ impl ObjStatuses {
created: Arc::new(Mutex::new(HashMap::new())),
modified: Arc::new(Mutex::new(HashMap::new())),
deleted: Arc::new(Mutex::new(HashMap::new())),
// staged: Arc::new(Mutex::new(Vec::new())),
// not_staged: Arc::new(Mutex::new(Vec::new())),
// untracked: Arc::new(Mutex::new(Vec::new()))
staged: Vec::new(),
not_staged: Vec::new(),
untracked: Vec::new(),
}
}
@ -40,9 +57,57 @@ impl ObjStatuses {
fn push_deleted(&self, key: String, value: Obj) {
self.deleted.lock().unwrap().insert(key, value);
}
fn get_created(&self) -> HashMapObj {
self.created.lock().unwrap().clone()
}
}
pub struct ObjStaged {
pub staged: Vec<Obj>,
pub not_staged: Vec<Obj>,
}
fn setup_staged(obj_statuses: Arc<ObjStatuses>, indexer: &Indexer) -> ObjStaged {
let mut staged = Vec::new();
let mut not_staged = Vec::new();
for (_, mut obj) in obj_statuses.get_created() {
obj.set_status(ObjStatus::Created);
if indexer.is_staged(&obj) {
staged.push(obj);
} else {
not_staged.push(obj);
}
}
ObjStaged { staged, not_staged }
// for (_, mut obj) in self.modified.lock().unwrap().iter() {
// obj.set_status(ObjStatus::Modified);
// if indexer.is_staged(&obj) {
// self.staged.lock().unwrap().push(obj);
// } else {
// self.not_staged.lock().unwrap().push(obj);
// }
// }
// for (_, mut obj)in self.deleted.lock().unwrap().iter() {
// obj.set_status(ObjStatus::Deleted);
// if indexer.is_staged(&obj) {
// self.staged.lock().unwrap().push(obj);
// } else {
// self.not_staged.lock().unwrap().push(obj);
// }
// }
}
pub fn exec(args: StatusArgs, config: Config) {
let status = get_obj_changes(&args, &config);
print_status(&status);
}
pub fn get_obj_changes(args: &StatusArgs, config: &Config) -> ObjStaged {
// use root of repo if no custom path has been set by the command
let root = if config.is_custom_execution_path {
config.execution_path.clone()
@ -50,14 +115,21 @@ pub fn exec(args: StatusArgs, config: Config) {
config.get_root_unsafe().to_path_buf()
};
let indexer = Arc::new({
let mut indexer = Indexer::new(config.get_root_unsafe());
indexer.load_unsafe();
indexer
});
let pool = ThreadPool::new(4);
let repo_root = config.get_root_unsafe().clone();
let res = Arc::new(ObjStatuses::new());
let pool_clone = pool.clone();
let res_clone = Arc::clone(&res);
let indexer_clone = Arc::clone(&indexer);
pool.execute(move || {
compare_dir(pool_clone, &repo_root, &root, res_clone);
compare_dir(pool_clone, indexer_clone, &repo_root, &root, res_clone);
});
pool.join();
@ -71,7 +143,62 @@ pub fn exec(args: StatusArgs, config: Config) {
for entry in res.deleted.lock().unwrap().iter() {
println!("deleted: {}", entry.0);
}
// find moved and copied
// find staged
setup_staged(Arc::clone(&res), &indexer)
}
///
fn print_status(objs: &ObjStaged) {
if objs.staged.len() == 0 && objs.not_staged.len() == 0 {
println!("Nothing to push, working tree clean");
return;
}
if objs.staged.len() != 0 {
println!("Changes to be pushed:");
println!(" (Use \"nextsync reset\" to unstage)");
// (use "git restore --staged <file>..." to unstage)
// by alphabetical order
for obj in objs.staged.iter() {
print_object(&obj, |status: &str| status.green());
}
}
// modified
// deleted
// renamed
// new file
println!("Changes not staged for push:");
println!(" (Use \"nextsync add <file>...\" to update what will be pushed)");
for obj in objs.not_staged.iter() {
print_object(&obj, |status: &str| status.red());
}
// println!("Untracked files:");
// for obj in objs.untracked.iter() {
// println!("{}", obj.cpy_path());
// }
}
fn print_object(obj: &Obj, color: impl Fn(&str) -> ColoredString) {
println!(
" {}{}",
match obj.get_status() {
ObjStatus::Created =>
if obj.get_obj_type() == &ObjType::Blob {
color("new file: ")
} else {
color("new: ")
},
ObjStatus::Modified => color("modified: "),
ObjStatus::Deleted => color("deleted: "),
_ => "unknown".red(),
},
color(&obj.cpy_path().to_string())
);
}
///
@ -80,7 +207,13 @@ pub fn exec(args: StatusArgs, config: Config) {
/// * `root_path`: path of the repo's root
/// * `path`: path we should analyze
/// * `res`: the struct in which we should store the response
fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<ObjStatuses>) {
fn compare_dir(
pool: ThreadPool,
indexer: Arc<Indexer>,
root_path: &PathBuf,
path: &PathBuf,
res: Arc<ObjStatuses>,
) {
let entries = match fs::read_dir(path) {
Ok(entries) => entries,
Err(err) => {
@ -105,23 +238,36 @@ fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<O
let repo_relative_entry = path::to_repo_relative(&entry.path(), root_path);
let local_obj = Obj::from_local_path(&repo_relative_entry);
if entry.path().is_dir() {
if entry.path().is_dir() {
if entry.path().ends_with(".nextsync") {
continue;
}
if local_obj.is_new() {
// TODO: opti move files in new directory
res.push_created(local_obj.cpy_path(), local_obj);
if indexer.is_staged_parent(&local_obj) {
let res_clone = Arc::clone(&res);
add_childs(root_path, &entry.path(), res_clone);
} else {
res.push_created(local_obj.cpy_path(), local_obj);
}
} else {
// can be modified
local_dirs.insert(local_obj.cpy_path(), local_obj);
// Analyze sub folder
let pool_clone = pool.clone();
let root_path_clone = root_path.clone();
let res_clone = Arc::clone(&res);
let indexer_clone = Arc::clone(&indexer);
pool.execute(move || {
compare_dir(pool_clone, &root_path_clone, &entry.path(), res_clone);
compare_dir(
pool_clone,
indexer_clone,
&root_path_clone,
&entry.path(),
res_clone,
);
});
}
} else {
@ -160,3 +306,36 @@ fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<O
}
}
}
fn add_childs(root_path: &PathBuf, path: &PathBuf, res: Arc<ObjStatuses>) {
let entries = match fs::read_dir(path) {
Ok(entries) => entries,
Err(err) => {
eprintln!("Failed to read {} ({err})", path.display());
return;
}
};
for entry in entries {
let entry = match entry {
Ok(entry) => entry,
Err(err) => {
eprintln!("Failed to read entry {err}");
continue;
}
};
let repo_relative_entry = path::to_repo_relative(&entry.path(), root_path);
let local_obj = Obj::from_local_path(&repo_relative_entry);
if entry.path().is_dir() {
if entry.path().ends_with(".nextsync") {
continue;
}
let res_clone = Arc::clone(&res);
add_childs(root_path, &entry.path(), res_clone);
res.push_created(local_obj.cpy_path(), local_obj);
} else {
res.push_created(local_obj.cpy_path(), local_obj);
}
}
}

5
src/lib.rs Normal file
View File

@ -0,0 +1,5 @@
pub mod commands;
pub mod config;
pub mod store;
pub mod subcommands;
pub mod utils;

View File

@ -1,38 +1,40 @@
use crate::store::object::ObjType;
use crate::store::object::{Obj, ObjType};
use crate::utils::path::normalize_path;
use std::cmp::Ordering;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom, Write};
use std::io::{self, Write};
use std::io::{BufRead, BufReader};
use std::path::PathBuf;
fn get_level(previous_level: u16, path: &PathBuf) -> (u16, IndexLevel) {
let mut level = 0;
let mut path = path.clone();
while path.pop() {
level += 1;
}
// Custom sorting function to handle paths hierarchically
fn sort_paths_hierarchically(paths: &mut Vec<PathBuf>) {
paths.sort_by(|a, b| {
// Split paths into components for comparison
let a_components: Vec<_> = a.components().collect();
let b_components: Vec<_> = b.components().collect();
let index_level = if previous_level < level {
IndexLevel::Down
} else {
IndexLevel::Up
};
// Compare components one by one
for (a_component, b_component) in a_components.iter().zip(b_components.iter()) {
match a_component.cmp(b_component) {
Ordering::Equal => continue,
ordering => return ordering,
}
}
(level, index_level)
}
enum IndexLevel {
Up,
Down,
// If one path is a prefix of the other, the shorter path comes first
a_components.len().cmp(&b_components.len())
});
}
#[derive(Debug)]
struct IndexedObj {
obj_type: ObjType,
level: IndexLevel,
path: PathBuf,
}
pub struct Indexer {
repo_root: PathBuf,
index_file: PathBuf,
previous_level: u16,
indexed_objs: Vec<IndexedObj>,
}
@ -43,47 +45,76 @@ impl Indexer {
index_file.push("index");
Indexer {
repo_root: repo_root.clone(),
index_file,
previous_level: 0,
indexed_objs: Vec::new(),
}
}
pub fn load(&self) -> io::Result<()> {
let mut file = File::open(&self.index_file)?;
let mut str = String::new();
pub fn load(&mut self) -> io::Result<()> {
let file = File::open(&self.index_file)?;
let reader = BufReader::new(file);
// Skip reserved bytes
let mut byte = [0; 1];
file.read_exact(&mut byte)?;
file.seek(SeekFrom::Start(1))?;
for line in reader.lines() {
let line = line?;
let line = line.as_bytes();
// Read usize value
let mut usize_bytes = [0; std::mem::size_of::<usize>()];
file.read_exact(&mut usize_bytes)?;
dbg!(ObjType::try_from(byte[0]));
let usize_value = usize::from_le_bytes(usize_bytes);
let path_str = String::from_utf8(line[1..].to_vec()).unwrap();
let path = PathBuf::from(path_str);
// Read PathBuf as string
let mut buffer = Vec::new();
file.read_to_end(&mut buffer)?;
let path_str = String::from_utf8(buffer).unwrap();
let path = PathBuf::from(path_str);
self.indexed_objs.push(IndexedObj {
obj_type: ObjType::try_from(line[0]).unwrap(),
path,
});
}
println!("usize value: {}", usize_value);
println!("Path: {:?}", path);
Ok(())
}
fn index_obj(&mut self, path: PathBuf, obj_type: ObjType) {
let (level, index_level) = get_level(self.previous_level, &path);
self.previous_level = level;
pub fn load_unsafe(&mut self) {
let _ = self.load();
dbg!(&self.indexed_objs);
}
self.indexed_objs.push(IndexedObj {
fn is_indexed(&self, obj: &IndexedObj) -> bool {
self.indexed_objs
.iter()
.position(|o| o.obj_type == obj.obj_type && o.path == obj.path)
.is_some()
}
pub fn is_staged(&self, obj: &Obj) -> bool {
dbg!(obj);
// self.indexed_objs.iter().position(|o| &o.obj_type == obj.get_obj_type() && &o.path == obj.get_obj_path()).is_some()
self.indexed_objs
.iter()
.position(|o| &o.path == obj.get_obj_path())
.is_some()
}
pub fn is_staged_parent(&self, obj: &Obj) -> bool {
self.indexed_objs
.iter()
.position(|o| o.path.starts_with(obj.get_obj_path()))
.is_some()
}
fn index_obj(&mut self, path: PathBuf, obj_type: ObjType) {
let mut path = path;
path = normalize_path(path); // normalize path (/foo/./bar => /foo/bar)
// change path to be relative to repo's root
path = path.strip_prefix(&self.repo_root).unwrap().to_path_buf();
let indexed_obj = IndexedObj {
obj_type,
level: index_level,
path: path.clone(),
});
};
if self.is_indexed(&indexed_obj) {
return;
}
self.indexed_objs.push(indexed_obj);
}
pub fn index_file(&mut self, path: PathBuf) {
@ -97,20 +128,15 @@ impl Indexer {
pub fn save(&self) -> io::Result<()> {
let mut file = File::create(&self.index_file)?;
// Write reserved bytes
let variant = ObjType::Blob;
let byte: u8 = variant.into();
file.write_all(&[byte])?;
for obj in self.indexed_objs.iter() {
// write obj_type
file.write_all(&[obj.obj_type.clone().into()])?;
// Write usize value
let usize_value: usize = 12;
let usize_bytes = usize_value.to_le_bytes();
file.write_all(&usize_bytes)?;
// write path
file.write_all(obj.path.to_str().unwrap().as_bytes())?;
// Write PathBuf as string
let path = PathBuf::from("/jodi/");
let path_str = path.to_str().unwrap();
file.write_all(path_str.as_bytes())?;
file.write_all(b"\n")?;
}
Ok(())
}

View File

@ -40,16 +40,17 @@ impl TryFrom<u8> for ObjType {
}
}
#[derive(PartialEq, Clone)]
#[derive(PartialEq, Clone, Debug)]
pub enum ObjStatus {
Undefined,
Created,
Modified,
Moved,
Copied,
Deleted,
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct Obj {
obj_type: ObjType,
status: OnceLock<ObjStatus>,
@ -67,15 +68,16 @@ impl Obj {
}
}
fn get_status(&self) -> &ObjStatus {
pub fn get_status(&self) -> &ObjStatus {
self.status.get_or_init(|| {
// read path
// todo!();
// TODO read path
ObjStatus::Created
})
}
pub fn set_status(&mut self, status: ObjStatus) {
todo!()
self.status = OnceLock::from(status);
}
pub fn set_type(&mut self, obj_type: ObjType) {
@ -86,6 +88,14 @@ impl Obj {
self.get_status() == &ObjStatus::Created
}
pub fn get_obj_type(&self) -> &ObjType {
&self.obj_type
}
pub fn get_obj_path(&self) -> &PathBuf {
&self.obj_path
}
pub fn cpy_path(&self) -> String {
path::to_string(&self.obj_path)
}

View File

@ -1,4 +1,4 @@
use std::path::{Path, PathBuf};
use std::path::{Path, PathBuf, Component};
pub fn to_repo_relative(path: &PathBuf, root: &PathBuf) -> PathBuf {
path.strip_prefix(root).unwrap().to_path_buf()
@ -7,3 +7,35 @@ pub fn to_repo_relative(path: &PathBuf, root: &PathBuf) -> PathBuf {
pub fn to_string(path: &PathBuf) -> String {
path.to_str().unwrap().to_string()
}
/// Improve the path to try remove and solve .. token.
/// Taken from https://stackoverflow.com/questions/68231306/stdfscanonicalize-for-files-that-dont-exist
///
/// This assumes that `a/b/../c` is `a/c` which might be different from
/// what the OS would have chosen when b is a link. This is OK
/// for broot verb arguments but can't be generally used elsewhere
///
/// This function ensures a given path ending with '/' still
/// ends with '/' after normalization.
pub fn normalize_path<P: AsRef<Path>>(path: P) -> PathBuf {
let ends_with_slash = path.as_ref()
.to_str()
.map_or(false, |s| s.ends_with('/'));
let mut normalized = PathBuf::new();
for component in path.as_ref().components() {
match &component {
Component::ParentDir => {
if !normalized.pop() {
normalized.push(component);
}
}
_ => {
normalized.push(component);
}
}
}
if ends_with_slash {
normalized.push("");
}
normalized
}

133
tests/common/client.rs Normal file
View File

@ -0,0 +1,133 @@
use nextsync::config::config::Config;
use rand::{distributions::Alphanumeric, Rng};
use std::env;
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::PathBuf;
use std::process::{Command, Output};
use std::str;
pub struct ClientTest {
volume: String, // temp dir for the test
pub test_id: String, // name of the test (e.g nextsync_rand)
exe_path: PathBuf, // absolute path of nextsync executable
}
pub fn get_random_test_id() -> String {
rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(4)
.map(char::from)
.collect()
}
impl ClientTest {
pub fn new(id: &str) -> Self {
let mut test_id = id.to_string();
test_id.push_str("_");
test_id.push_str(&get_random_test_id());
// create a directory in /tmp with the given id
let mut vol = String::from("/tmp/nextsync/");
vol.push_str(&test_id);
let _ = fs::remove_dir_all(&vol);
let _ = fs::create_dir_all(&vol);
// get nextsync path
let mut exe_path = env::current_dir().unwrap();
exe_path = exe_path.join("target/debug/nextsync");
// build the client
ClientTest {
volume: vol,
test_id,
exe_path,
}
}
pub fn init(mut self) -> Self {
self.exec_ok("init");
println!("========== {} ========== ", &self.test_id);
// set remote url
// let url = String::from(format!("{}@nextcloud.local/{}", self.user, self.test_id));
// self.run_cmd_ok(&format!("remote add origin {}", url));
// // set force_unsecure as debug server has not certificate
// self.run_cmd_ok("config set force_insecure true");
// // set token for request
// self.run_cmd_ok(&format!("credential add {} {}", self.user, self.user));
self
}
pub fn get_config(&self) -> Config {
Config::from(Some(&self.volume))
}
pub fn ok(self) -> io::Result<()> {
fs::remove_dir_all(&self.volume)?;
Ok(())
}
pub fn exec_ok(&mut self, args: &str) -> Output {
let output = self.exec(args);
if !output.status.success() {
println!("id: {}", self.test_id.clone());
println!("Failed to execute: '{}'", args);
println!("stderr: {}", String::from_utf8_lossy(&output.stderr));
println!("stdout: {}", String::from_utf8_lossy(&output.stdout));
}
assert!(output.status.success());
output
}
pub fn exec(&mut self, args: &str) -> Output {
let output = Command::new(self.exe_path.to_str().unwrap())
.current_dir(self.volume.clone())
.args(args.split(" "))
.output()
.expect("Could not execute nextsync command");
return output;
}
pub fn add_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let _ = fs::create_dir_all(path)?;
Ok(())
}
pub fn add_file(&mut self, name: &str, content: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let mut file = File::create(path)?;
file.write_all(content.as_bytes())?;
Ok(())
}
pub fn remove_file(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_file(path)?;
Ok(())
}
pub fn remove_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_dir_all(path)?;
Ok(())
}
// pub fn has_file(&mut self, file: &str, content: &str) -> bool {
// let full_path = PathBuf::from(self.volume.clone()).join(file);
// // has_files(full_path, file, content, self.test_id.clone())
// }
}

2
tests/common/mod.rs Normal file
View File

@ -0,0 +1,2 @@
pub mod client;
pub mod utils;

0
tests/common/utils.rs Normal file
View File

1
tests/status/main.rs Normal file
View File

@ -0,0 +1 @@
mod status_test;

View File

@ -0,0 +1,14 @@
// #[cfg(test)]
// mod status_tests {
// #[test]
// fn status_test() {
// let client = ClientTest::new("").init();
// }
// // basic add
// // add all folder
// // add all folder current
// // ../folder/file add
// // add part of folder
// }

105
tests/status_test.rs Normal file
View File

@ -0,0 +1,105 @@
mod common;
use common::client::ClientTest;
use nextsync::commands::status::{get_obj_changes, StatusArgs};
use nextsync::config::config::Config;
use std::io;
use std::path::PathBuf;
const DEFAULT_STATUS_ARG: StatusArgs = StatusArgs { nostyle: false };
fn status_exepected(config: &Config, staged: Vec<&str>, not_staged: Vec<&str>) {
let res = get_obj_changes(&DEFAULT_STATUS_ARG, config);
assert_eq!(res.staged.len(), staged.len());
assert_eq!(res.not_staged.len(), not_staged.len());
for obj in staged {
assert!(res
.staged
.iter()
.position(|e| { e.get_obj_path() == &PathBuf::from(obj) })
.is_some());
}
for obj in not_staged {
assert!(res
.not_staged
.iter()
.position(|e| { e.get_obj_path() == &PathBuf::from(obj) })
.is_some());
}
}
#[test]
fn simple_file() -> io::Result<()> {
let mut client = ClientTest::new("status__simple_file").init();
client.add_file("foo", "foo")?;
status_exepected(&client.get_config(), vec![], vec!["foo"]);
client.exec_ok("add foo");
status_exepected(&client.get_config(), vec!["foo"], vec![]);
client.ok()
}
#[test]
#[ignore]
fn all_folder() -> io::Result<()> {
let mut client = ClientTest::new("status__all_folder").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foo", "foo")?;
status_exepected(&client.get_config(), vec![], vec!["foo", "dir"]);
client.exec_ok("add dir");
status_exepected(&client.get_config(), vec!["dir"], vec!["foo"]);
client.ok()
}
#[test]
#[ignore]
fn all_folder_current() -> io::Result<()> {
let mut client = ClientTest::new("status__all_folder_current").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foor", "foor")?;
status_exepected(&client.get_config(), vec![], vec!["foor", "dir"]);
client.exec_ok("add dir");
status_exepected(
&Config::from(Some(&String::from("./dir"))),
vec!["foor", "bar"],
vec!["../foo"],
);
client.ok()
}
#[test]
fn part_of_folder() -> io::Result<()> {
let mut client = ClientTest::new("status__part_of_folder").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foor", "foor")?;
status_exepected(&client.get_config(), vec![], vec!["foor", "dir"]);
client.exec_ok("add dir/foo");
status_exepected(
&client.get_config(),
vec!["dir/foo"],
vec!["foor", "dir/bar"],
);
client.ok()
}
// ../folder/file add

5
tests/tests.rs Normal file
View File

@ -0,0 +1,5 @@
#[cfg(test)]
mod tests {
// mod status;
}

110
tests/utils/client.rs Normal file
View File

@ -0,0 +1,110 @@
#[cfg(test)]
pub struct ClientTest {
volume: String, // temp dir for the test
pub test_id: String, // name of the test (e.g nextsync_rand)
exe_path: PathBuf, // absolute path of nextsync executable
}
#[cfg(test)]
impl ClientTest {
pub fn new(id: String) -> Self {
// create a directory in /tmp with the given id
let mut vol = String::from("/tmp/");
vol.push_str(&id);
let _ = fs::create_dir(vol.clone());
// get nextsync path
let mut exe_path = env::current_dir().unwrap();
exe_path = exe_path.join("target/debug/nextsync");
// build the client
ClientTest {
user: String::from("admin"),
volume: vol,
test_id: id,
exe_path
}
}
pub fn init(mut self) -> Self {
self.run_cmd_ok("init");
// set remote url
let url = String::from(format!("{}@nextcloud.local/{}", self.user, self.test_id));
self.run_cmd_ok(&format!("remote add origin {}", url));
// set force_unsecure as debug server has not certificate
self.run_cmd_ok("config set force_insecure true");
// set token for request
self.run_cmd_ok(&format!("credential add {} {}", self.user, self.user));
self
}
pub fn clean(self) -> Self {
let _ = fs::remove_dir_all(&self.volume);
self
}
pub fn run_cmd_ok(&mut self, args: &str) -> Output {
let output = self.run_cmd(args);
if !output.status.success() {
println!("id: {}", self.test_id.clone());
println!("Failed to execute: '{}'", args);
println!("stderr: {}", String::from_utf8_lossy(&output.stderr));
println!("stdout: {}", String::from_utf8_lossy(&output.stdout));
}
assert!(output.status.success());
output
}
pub fn run_cmd(&mut self, args: &str) -> Output {
let output = Command::new(self.exe_path.to_str().unwrap())
.current_dir(self.volume.clone())
.args(args.split(" "))
.output()
.expect("Could not execute nextsync command");
return output;
}
pub fn add_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let _ = fs::create_dir_all(path)?;
Ok(())
}
pub fn add_file(&mut self, name: &str, content: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let mut file = File::create(path)?;
file.write_all(content.as_bytes())?;
Ok(())
}
pub fn remove_file(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_file(path)?;
Ok(())
}
pub fn remove_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_dir_all(path)?;
Ok(())
}
pub fn has_file(&mut self, file: &str, content: &str) -> bool {
let full_path = PathBuf::from(self.volume.clone()).join(file);
has_files(full_path, file, content, self.test_id.clone())
}
}