Compare commits

..

19 Commits

Author SHA1 Message Date
grimhilt
09f1b26a50 feat(download): simple downloader working without streaming 2024-10-13 22:07:42 +02:00
grimhilt
3853d1fe27 feat(service): include user and base_path in the service 2024-10-13 18:11:11 +02:00
grimhilt
d0f2fafbbb feat(clone): create and save folder + started downloader 2024-10-13 16:26:30 +02:00
grimhilt
487a94f829 feat(enumerator): working enumerator with depth 2024-09-29 22:56:51 +02:00
grimhilt
6989e87a56 feat(enumerator): try to have an aux functions for the task 2024-09-29 22:36:53 +02:00
grimhilt
33d35aba49 feat(clone): created command clone 2024-09-15 21:27:03 +02:00
grimhilt
adac2e6cda feat(enumerator): started enumerator and improved service 2024-09-15 21:26:49 +02:00
grimhilt
d442bf689c feat(clone): read url 2024-09-15 18:16:46 +02:00
grimhilt
3af7a00b0f feat(reqprops): deserialize response 2024-09-15 16:16:38 +02:00
grimhilt
61531f664b feat(nsconfig): basic config and basic integration in service 2024-09-15 14:48:45 +02:00
grimhilt
e780279acd feat(service): started service 2024-09-14 21:33:11 +02:00
grimhilt
cd7b225145 fix(status): get status in obj and fix tests env dir 2024-09-14 13:59:00 +02:00
grimhilt
a69a71d843 feat(push): save objects 2024-09-12 23:22:20 +02:00
grimhilt
8f636b4bf7 feat(status): relative path of obj 2024-09-11 23:36:29 +02:00
grimhilt
e66dc8d408 tests(add): introduce basics tests for add 2024-09-08 17:59:39 +02:00
grimhilt
baeef1a33a feat(reset): introduce reset and fix ROOT_PATH 2024-09-08 17:59:22 +02:00
grimhilt
bc6a23b76b feat(structs): set wrappers to path 2024-09-08 15:56:20 +02:00
grimhilt
1df9c3fba5 test(status): basics tests 2024-09-08 00:18:48 +02:00
grimhilt
250018c4bf feat(indexer+status): make some basics case work 2024-09-08 00:18:33 +02:00
43 changed files with 2232 additions and 178 deletions

View File

@@ -1,3 +1,7 @@
pub mod add; pub mod add;
pub mod init; pub mod init;
pub mod status; pub mod status;
pub mod reset;
pub mod push;
pub mod test;
pub mod clone;

View File

@@ -2,8 +2,11 @@ use crate::config::config::Config;
use crate::store::{ use crate::store::{
ignorer::Ignorer, ignorer::Ignorer,
indexer::Indexer, indexer::Indexer,
nsobject::{self, NsObject}, nsobject::NsObject,
structs::{self, to_obj_path},
}; };
use crate::utils::path::to_repo_relative;
use colored::Colorize;
use std::fs; use std::fs;
// use glob::glob; // use glob::glob;
use std::path::PathBuf; use std::path::PathBuf;
@@ -18,17 +21,15 @@ pub struct AddArgs {
} }
pub fn exec(args: AddArgs, config: Config) { pub fn exec(args: AddArgs, config: Config) {
structs::init(config.get_root_unsafe());
// Init ignorer // Init ignorer
let mut ignorer = Ignorer::new(config.get_root_unsafe()); let mut ignorer = Ignorer::new(config.get_root_unsafe());
ignorer.active_nsignore(!args.force); ignorer.active_nsignore(!args.force);
// Init indexer // Init indexer
let mut indexer = Indexer::new(config.get_root_unsafe()); let mut indexer = Indexer::new(config.get_root_unsafe());
dbg!(indexer.save()); let _ = indexer.load();
dbg!(indexer.load());
return;
nsobject::init(config.get_root_unsafe());
if args.all { if args.all {
return add_dir(config.get_root_unsafe(), &mut ignorer, &mut indexer); return add_dir(config.get_root_unsafe(), &mut ignorer, &mut indexer);
@@ -39,12 +40,18 @@ pub fn exec(args: AddArgs, config: Config) {
path_to_add.push(PathBuf::from(obj_to_add)); path_to_add.push(PathBuf::from(obj_to_add));
if path_to_add.exists() { if path_to_add.exists() {
// ignore object if it is ns config file or nsignored
if ignorer.should_ignore(&path_to_add) {
continue;
}
if path_to_add.is_dir() { if path_to_add.is_dir() {
indexer.index_dir(path_to_add.clone());
add_dir(&path_to_add, &mut ignorer, &mut indexer); add_dir(&path_to_add, &mut ignorer, &mut indexer);
} else { } else {
indexer.index_file(path_to_add); indexer.index_file(path_to_add);
} }
} else if NsObject::from_local_path(&path_to_add).exists() { } else if NsObject::from_local_path(&to_obj_path(&path_to_add)).exists() {
indexer.index_file(path_to_add); indexer.index_file(path_to_add);
} else { } else {
// try globbing // try globbing
@@ -53,16 +60,32 @@ pub fn exec(args: AddArgs, config: Config) {
panic!("fatal: pathspec '{}' did not match any files", obj_to_add); panic!("fatal: pathspec '{}' did not match any files", obj_to_add);
} }
} }
// print all path ignored
if ignorer.ignored_paths.len() > 0 {
println!("The following paths are ignored by one of your .nsignore files:");
for ignored_path in ignorer.ignored_paths {
println!("{}", to_repo_relative(&ignored_path).display());
}
println!(
"{}",
"hint: Use -f if you really want to add them.".bright_red()
);
// hint: Turn this message off by running
// hint: "git config advice.addIgnoredFile false"
}
dbg!(indexer.save());
/* /*
for all files for all files
if globbing if globbing
take all match glob nextsyncignore take all match glob nextsyncignore
else else
if dir if dir
add dir add dir
add all childs with nextsyncignore add all childs with nextsyncignore
else else
add files add files
*/ */
} }

190
src/commands/clone.rs Normal file
View File

@@ -0,0 +1,190 @@
use super::init;
use crate::config::config::Config;
use crate::services::{downloader::Downloader, enumerator::Enumerator, service::Service};
use crate::store::{
nsobject::NsObject,
structs,
};
use regex::Regex;
use std::fs;
use std::io::{self, BufRead};
pub struct CloneArgs {
pub remote: String,
pub depth: Option<String>,
pub force_insecure: bool,
}
pub async fn exec(args: CloneArgs, config: Config) {
init::init(&config);
structs::init(config.get_root_unsafe());
let mut url_props = get_url_props(&args.remote);
if args.force_insecure {
url_props.is_secure = false;
}
// Ask for webdav user
if url_props.user == String::new() {
println!("Please enter the username of the webdav instance: ");
let stdin = io::stdin();
url_props.user = stdin.lock().lines().next().unwrap().unwrap();
}
let service = Service::from(&url_props);
let Ok((files, folders)) = Enumerator::new(&service)
.set_path(String::new()) // use base_path of the service
// .set_depth(args.depth)
.get_properties(vec![])
.enumerate()
.await
else {
todo!("Enumerator has failed")
};
dbg!(&files);
dbg!(&folders);
for folder in folders {
if folder.abs_path() == "" {
continue;
}
fs::create_dir(folder.obj_path()).unwrap();
NsObject::from_local_path(&folder.obj_path())
.save()
.unwrap();
}
let downloader = Downloader::new(&service)
.set_files(
files
.into_iter()
.map(|file| file.abs_path().to_string())
.collect(),
)
.download()
.await;
}
pub struct UrlProps<'a> {
pub is_secure: bool,
pub domain: &'a str,
pub path: &'a str,
pub user: String,
}
impl UrlProps<'_> {
fn new() -> Self {
UrlProps {
is_secure: true,
domain: "",
path: "",
user: String::new(),
}
}
/// Generate the url with all the informations of the instance
pub fn full_url(&self) -> String {
format!(
"{}@{}{}{}",
self.user,
match self.is_secure {
true => "https://",
false => "http://",
},
self.domain,
self.path
)
}
}
pub fn get_url_props(url: &str) -> UrlProps {
let mut url_props = UrlProps::new();
// Match protocol and domain
let re = Regex::new(r"((?<protocol>https?)://)?(?<domain>[^/]*)").unwrap();
let captures = re.captures(url).expect("fatal: invalid url");
// Assume is secure
let protocol = captures.name("protocol").map_or("https", |m| m.as_str());
url_props.is_secure = protocol == "https";
url_props.domain = captures
.name("domain")
.map(|m| m.as_str())
.expect("fatal: domain not found");
// Get rest of url
let end_of_domain_idx = captures
.name("domain")
.expect("Already unwraped before")
.end();
let rest_of_url = &url[end_of_domain_idx..];
// Try webdav url
if let Some(rest_of_url) = rest_of_url.strip_prefix("/remote.php/dav/files") {
let re = Regex::new(r"[^\/]*(?<path>.*)").unwrap();
url_props.path = re
.captures(rest_of_url)
.expect("fatal: invalid webdav url")
.name("path")
.map_or("/", |m| m.as_str());
return url_props;
}
// Try 'dir' argument
let re = Regex::new(r"\?dir=(?<path>[^&]*)").unwrap();
if let Some(captures) = re.captures(rest_of_url) {
url_props.path = captures.name("path").map_or("/", |m| m.as_str());
return url_props;
}
// Try path next to domain
if rest_of_url.chars().nth(0).expect("fatal: invalid url") == '/' {
url_props.path = rest_of_url;
return url_props;
}
panic!("fatal: invalid url (cannot found path)");
}
#[cfg(test)]
mod tests {
use super::*;
const DOMAIN: &str = "nextcloud.com";
const SUBDOMAIN: &str = "nextcloud.example.com";
fn compare_url_props(url_to_test: &str, is_secure: bool, domain: &str) {
let path = "/foo/bar";
let url_props = get_url_props(url_to_test);
assert_eq!(url_props.is_secure, is_secure);
assert_eq!(url_props.domain, domain);
assert_eq!(url_props.path, path);
}
#[test]
fn get_url_props_from_browser_test() {
compare_url_props(
"https://nextcloud.com/apps/files/?dir=/foo/bar&fileid=166666",
true,
DOMAIN,
);
compare_url_props(
"https://nextcloud.com/apps/files/files/625?dir=/foo/bar",
true,
DOMAIN,
);
}
#[test]
fn get_url_props_direct_url_test() {
compare_url_props("https://nextcloud.example.com/foo/bar", true, SUBDOMAIN);
compare_url_props("http://nextcloud.example.com/foo/bar", false, SUBDOMAIN);
compare_url_props("nextcloud.example.com/foo/bar", true, SUBDOMAIN);
}
#[test]
fn get_url_props_with_port_test() {
compare_url_props("localhost:8080/foo/bar", true, "localhost:8080");
}
}

View File

@@ -18,6 +18,10 @@ pub struct InitArgs {}
/// ///
/// This function will panic if it cannot create the mentioned files or directories. /// This function will panic if it cannot create the mentioned files or directories.
pub fn exec(_: InitArgs, config: Config) { pub fn exec(_: InitArgs, config: Config) {
init(&config);
}
pub fn init(config: &Config) {
let mut path: PathBuf = config.execution_path.clone(); let mut path: PathBuf = config.execution_path.clone();
path.push(".nextsync"); path.push(".nextsync");

20
src/commands/push.rs Normal file
View File

@@ -0,0 +1,20 @@
use crate::config::config::Config;
use crate::store::object::Obj;
use crate::store::{indexer::Indexer, structs};
pub struct PushArgs {}
pub fn exec(_args: PushArgs, config: Config) {
structs::init(config.get_root_unsafe());
let mut indexer = Indexer::new(config.get_root_unsafe());
let _ = indexer.load();
for indexed_obj in indexer.get_indexed_objs() {
let local_obj = Obj::from_local_path(&indexed_obj.path);
local_obj.save().unwrap();
}
indexer.clear();
let _ = indexer.save();
}

10
src/commands/reset.rs Normal file
View File

@@ -0,0 +1,10 @@
use crate::config::config::Config;
use crate::store::indexer::Indexer;
pub struct ResetArgs {}
pub fn exec(_args: ResetArgs, config: Config) {
// Init ignorer
let indexer = Indexer::new(config.get_root_unsafe());
let _ = indexer.save();
}

View File

@@ -1,12 +1,16 @@
use crate::config::config::Config; use crate::config::config::Config;
use crate::store::{ use crate::store::{
indexer::Indexer,
nsobject::NsObject, nsobject::NsObject,
object::{Obj, ObjStatus}, object::{Obj, ObjStatus, ObjType},
structs,
structs::to_obj_path,
}; };
use crate::utils::path; use crate::utils::path;
use colored::{ColoredString, Colorize};
use std::collections::HashMap; use std::collections::HashMap;
use std::fs; use std::fs;
use std::path::{Path, PathBuf}; use std::path::PathBuf;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use threadpool::ThreadPool; use threadpool::ThreadPool;
@@ -14,10 +18,19 @@ pub struct StatusArgs {
pub nostyle: bool, pub nostyle: bool,
} }
type HashMapObj = HashMap<String, Obj>;
type HashMapObjStatuses = Arc<Mutex<HashMapObj>>;
struct ObjStatuses { struct ObjStatuses {
created: Arc<Mutex<HashMap<String, Obj>>>, created: HashMapObjStatuses,
modified: Arc<Mutex<HashMap<String, Obj>>>, modified: HashMapObjStatuses,
deleted: Arc<Mutex<HashMap<String, Obj>>>, deleted: HashMapObjStatuses,
// staged: Arc<Mutex<Vec<Obj>>>,
// not_staged: Arc<Mutex<Vec<Obj>>>,
// untracked: Arc<Mutex<Vec<Obj>>>,
staged: Vec<Obj>,
not_staged: Vec<Obj>,
untracked: Vec<Obj>,
} }
impl ObjStatuses { impl ObjStatuses {
@@ -26,6 +39,12 @@ impl ObjStatuses {
created: Arc::new(Mutex::new(HashMap::new())), created: Arc::new(Mutex::new(HashMap::new())),
modified: Arc::new(Mutex::new(HashMap::new())), modified: Arc::new(Mutex::new(HashMap::new())),
deleted: Arc::new(Mutex::new(HashMap::new())), deleted: Arc::new(Mutex::new(HashMap::new())),
// staged: Arc::new(Mutex::new(Vec::new())),
// not_staged: Arc::new(Mutex::new(Vec::new())),
// untracked: Arc::new(Mutex::new(Vec::new()))
staged: Vec::new(),
not_staged: Vec::new(),
untracked: Vec::new(),
} }
} }
@@ -40,9 +59,62 @@ impl ObjStatuses {
fn push_deleted(&self, key: String, value: Obj) { fn push_deleted(&self, key: String, value: Obj) {
self.deleted.lock().unwrap().insert(key, value); self.deleted.lock().unwrap().insert(key, value);
} }
fn get_created(&self) -> HashMapObj {
self.created.lock().unwrap().clone()
}
}
pub struct ObjStaged {
pub staged: Vec<Obj>,
pub not_staged: Vec<Obj>,
}
fn setup_staged(obj_statuses: Arc<ObjStatuses>, indexer: &Indexer) -> ObjStaged {
let mut staged = Vec::new();
let mut not_staged = Vec::new();
for (_, mut obj) in obj_statuses.get_created() {
obj.set_status(ObjStatus::Created);
if indexer.is_staged(&obj) {
staged.push(obj);
} else {
not_staged.push(obj);
}
}
ObjStaged { staged, not_staged }
// for (_, mut obj) in self.modified.lock().unwrap().iter() {
// obj.set_status(ObjStatus::Modified);
// if indexer.is_staged(&obj) {
// self.staged.lock().unwrap().push(obj);
// } else {
// self.not_staged.lock().unwrap().push(obj);
// }
// }
// for (_, mut obj)in self.deleted.lock().unwrap().iter() {
// obj.set_status(ObjStatus::Deleted);
// if indexer.is_staged(&obj) {
// self.staged.lock().unwrap().push(obj);
// } else {
// self.not_staged.lock().unwrap().push(obj);
// }
// }
} }
pub fn exec(args: StatusArgs, config: Config) { pub fn exec(args: StatusArgs, config: Config) {
let status = get_obj_changes(&config);
if args.nostyle {
todo!("Not style not implemented yet");
}
print_status(&status, config.get_root_unsafe());
}
pub fn get_obj_changes(config: &Config) -> ObjStaged {
structs::init(config.get_root_unsafe());
// use root of repo if no custom path has been set by the command // use root of repo if no custom path has been set by the command
let root = if config.is_custom_execution_path { let root = if config.is_custom_execution_path {
config.execution_path.clone() config.execution_path.clone()
@@ -50,14 +122,21 @@ pub fn exec(args: StatusArgs, config: Config) {
config.get_root_unsafe().to_path_buf() config.get_root_unsafe().to_path_buf()
}; };
let indexer = Arc::new({
let mut indexer = Indexer::new(config.get_root_unsafe());
indexer.load_unsafe();
indexer
});
let pool = ThreadPool::new(4); let pool = ThreadPool::new(4);
let repo_root = config.get_root_unsafe().clone(); let repo_root = config.get_root_unsafe().clone();
let res = Arc::new(ObjStatuses::new()); let res = Arc::new(ObjStatuses::new());
let pool_clone = pool.clone(); let pool_clone = pool.clone();
let res_clone = Arc::clone(&res); let res_clone = Arc::clone(&res);
let indexer_clone = Arc::clone(&indexer);
pool.execute(move || { pool.execute(move || {
compare_dir(pool_clone, &repo_root, &root, res_clone); compare_dir(pool_clone, indexer_clone, &repo_root, &root, res_clone);
}); });
pool.join(); pool.join();
@@ -71,7 +150,61 @@ pub fn exec(args: StatusArgs, config: Config) {
for entry in res.deleted.lock().unwrap().iter() { for entry in res.deleted.lock().unwrap().iter() {
println!("deleted: {}", entry.0); println!("deleted: {}", entry.0);
} }
// find moved and copied // find moved and copied
// find staged
setup_staged(Arc::clone(&res), &indexer)
}
///
fn print_status(objs: &ObjStaged, root_path: &PathBuf) {
if objs.staged.len() == 0 && objs.not_staged.len() == 0 {
println!("Nothing to push, working tree clean");
return;
}
if objs.staged.len() != 0 {
println!("Changes to be pushed:");
println!(" (Use \"nextsync reset\" to unstage)");
// (use "git restore --staged <file>..." to unstage)
// by alphabetical order
for obj in objs.staged.iter() {
print_object(&obj, root_path, |status: &str| status.green());
}
}
// modified
// deleted
// renamed
// new file
println!("Changes not staged for push:");
println!(" (Use \"nextsync add <file>...\" to update what will be pushed)");
for obj in objs.not_staged.iter() {
print_object(&obj, root_path, |status: &str| status.red());
}
// println!("Untracked files:");
// for obj in objs.untracked.iter() {
// println!("{}", obj.cpy_path());
// }
}
fn print_object(obj: &Obj, root_path: &PathBuf, color: impl Fn(&str) -> ColoredString) {
println!(
" {}{}",
match obj.get_status() {
ObjStatus::Created => match *obj.get_obj_type() {
ObjType::Blob => color("new file: "),
ObjType::Tree => color("new dir: "),
ObjType::Obj => color("new: "),
},
ObjStatus::Modified => color("modified: "),
ObjStatus::Deleted => color("deleted: "),
_ => "unknown".red(),
},
color(&path::to_string(&obj.get_env_relative_path(root_path)))
);
} }
/// ///
@@ -80,7 +213,13 @@ pub fn exec(args: StatusArgs, config: Config) {
/// * `root_path`: path of the repo's root /// * `root_path`: path of the repo's root
/// * `path`: path we should analyze /// * `path`: path we should analyze
/// * `res`: the struct in which we should store the response /// * `res`: the struct in which we should store the response
fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<ObjStatuses>) { fn compare_dir(
pool: ThreadPool,
indexer: Arc<Indexer>,
root_path: &PathBuf,
path: &PathBuf,
res: Arc<ObjStatuses>,
) {
let entries = match fs::read_dir(path) { let entries = match fs::read_dir(path) {
Ok(entries) => entries, Ok(entries) => entries,
Err(err) => { Err(err) => {
@@ -102,26 +241,38 @@ fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<O
} }
}; };
let repo_relative_entry = path::to_repo_relative(&entry.path(), root_path); if entry.path().ends_with(".nextsync") {
let local_obj = Obj::from_local_path(&repo_relative_entry); continue;
}
if entry.path().is_dir() { let local_obj = Obj::from_local_path(&entry.path().into());
if entry.path().ends_with(".nextsync") {
continue;
}
if entry.path().is_dir() {
if local_obj.is_new() { if local_obj.is_new() {
// TODO: opti move files in new directory // TODO: opti move files in new directory
res.push_created(local_obj.cpy_path(), local_obj); if indexer.is_staged_parent(&local_obj) {
let res_clone = Arc::clone(&res);
add_childs(root_path, &entry.path(), res_clone);
} else {
res.push_created(local_obj.cpy_path(), local_obj);
}
} else { } else {
// can be modified
local_dirs.insert(local_obj.cpy_path(), local_obj); local_dirs.insert(local_obj.cpy_path(), local_obj);
// Analyze sub folder // Analyze sub folder
let pool_clone = pool.clone(); let pool_clone = pool.clone();
let root_path_clone = root_path.clone(); let root_path_clone = root_path.clone();
let res_clone = Arc::clone(&res); let res_clone = Arc::clone(&res);
let indexer_clone = Arc::clone(&indexer);
pool.execute(move || { pool.execute(move || {
compare_dir(pool_clone, &root_path_clone, &entry.path(), res_clone); compare_dir(
pool_clone,
indexer_clone,
&root_path_clone,
&entry.path(),
res_clone,
);
}); });
} }
} else { } else {
@@ -134,7 +285,7 @@ fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<O
} }
// Read ns objects to find deleted // Read ns objects to find deleted
let entries = NsObject::from_local_path(&path); let entries = NsObject::from_local_path(&to_obj_path(&path));
for entry in entries.iter() { for entry in entries.iter() {
if entry.is_file() { if entry.is_file() {
match local_files.get(entry.get_obj_path().to_str().unwrap()) { match local_files.get(entry.get_obj_path().to_str().unwrap()) {
@@ -160,3 +311,35 @@ fn compare_dir(pool: ThreadPool, root_path: &PathBuf, path: &PathBuf, res: Arc<O
} }
} }
} }
fn add_childs(root_path: &PathBuf, path: &PathBuf, res: Arc<ObjStatuses>) {
let entries = match fs::read_dir(path) {
Ok(entries) => entries,
Err(err) => {
eprintln!("Failed to read {} ({err})", path.display());
return;
}
};
for entry in entries {
let entry = match entry {
Ok(entry) => entry,
Err(err) => {
eprintln!("Failed to read entry {err}");
continue;
}
};
let local_obj = Obj::from_local_path(&entry.path().into());
if entry.path().is_dir() {
if entry.path().ends_with(".nextsync") {
continue;
}
let res_clone = Arc::clone(&res);
add_childs(root_path, &entry.path(), res_clone);
res.push_created(local_obj.cpy_path(), local_obj);
} else {
res.push_created(local_obj.cpy_path(), local_obj);
}
}
}

29
src/commands/test.rs Normal file
View File

@@ -0,0 +1,29 @@
use crate::config::config::Config;
// use crate::services::req_props::{Props, ReqProps};
// use crate::store::object::Obj;
// use crate::store::{indexer::Indexer, structs};
pub struct TestArgs {}
pub fn exec(args: TestArgs, config: Config) {
// let mut settings = CConfig::default();
// settings.merge(File::with_name("config.toml")).unwrap();
// dbg!(settings);
// let config: MainConfig =
// toml::from_str(&std::fs::read_to_string("config.toml").unwrap()).unwrap();
// dbg!(config);
// let config: ConfigFile = settings.try_into().unwrap();
// println!("{:?}", config);
// Ok(())
// tokio::runtime::Runtime::new().unwrap().block_on(async {
// let mut req = ReqProps::new("")
// .set_config(&config)
// .get_properties(vec![Props::CreationDate, Props::LastModified]);
// req.send().await;
// });
// dbg!(req);
}

View File

@@ -1 +1,2 @@
pub mod config; pub mod config;
pub mod nsconfig;

View File

@@ -1,3 +1,4 @@
use super::nsconfig::{load_nsconfig, NsConfig};
use std::env; use std::env;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::OnceLock; use std::sync::OnceLock;
@@ -6,10 +7,12 @@ use std::sync::OnceLock;
/// # Parameters /// # Parameters
/// * `execution_path`: path of the command (directory arg) or current path /// * `execution_path`: path of the command (directory arg) or current path
/// * `root`: path of the repo /// * `root`: path of the repo
#[derive(Clone)]
pub struct Config { pub struct Config {
pub execution_path: PathBuf, pub execution_path: PathBuf,
pub is_custom_execution_path: bool, pub is_custom_execution_path: bool,
root: OnceLock<Option<PathBuf>>, root: OnceLock<Option<PathBuf>>,
nsconfig: OnceLock<NsConfig>,
} }
impl Config { impl Config {
@@ -18,6 +21,7 @@ impl Config {
execution_path: PathBuf::from(env::current_dir().unwrap()), execution_path: PathBuf::from(env::current_dir().unwrap()),
is_custom_execution_path: false, is_custom_execution_path: false,
root: OnceLock::new(), root: OnceLock::new(),
nsconfig: OnceLock::new(),
} }
} }
@@ -27,6 +31,7 @@ impl Config {
execution_path: PathBuf::from(path), execution_path: PathBuf::from(path),
is_custom_execution_path: true, is_custom_execution_path: true,
root: OnceLock::new(), root: OnceLock::new(),
nsconfig: OnceLock::new(),
}, },
None => Config::new(), None => Config::new(),
} }
@@ -58,4 +63,9 @@ impl Config {
None => panic!("fatal: not a nextsync repository (or any parent up to mount point /)"), None => panic!("fatal: not a nextsync repository (or any parent up to mount point /)"),
} }
} }
pub fn get_nsconfig(&self) -> &NsConfig {
self.nsconfig
.get_or_init(|| load_nsconfig(self.get_root_unsafe()))
}
} }

67
src/config/nsconfig.rs Normal file
View File

@@ -0,0 +1,67 @@
use serde::Deserialize;
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
#[derive(Deserialize, Debug, Clone)]
#[serde(default)]
struct Core {}
impl Default for Core {
fn default() -> Self {
Core {}
}
}
#[derive(Deserialize, Debug, Clone)]
#[serde(default)]
pub struct Remote {
pub url: Option<String>,
/// Force connection to nextcloud to be http (not https)
forceinsecure: bool,
}
impl Default for Remote {
fn default() -> Self {
Remote {
url: None,
forceinsecure: false,
}
}
}
#[derive(Deserialize, Debug, Clone)]
#[serde(default)]
pub struct NsConfig {
core: Core,
remote: std::collections::HashMap<String, Remote>,
}
impl NsConfig {
pub fn get_remote(&self, name: &str) -> Remote {
self.remote.get(name).unwrap().clone()
}
}
impl Default for NsConfig {
fn default() -> Self {
NsConfig {
core: Core::default(),
remote: HashMap::new(),
}
}
}
pub fn load_nsconfig(repo_root: &PathBuf) -> NsConfig {
let mut config_file_path = repo_root.clone();
config_file_path.push(".nextsync");
config_file_path.push("config");
if !config_file_path.exists() {
return NsConfig::default();
}
let config: NsConfig = toml::from_str(&fs::read_to_string(config_file_path).unwrap()).unwrap();
config
}

6
src/lib.rs Normal file
View File

@@ -0,0 +1,6 @@
pub mod commands;
pub mod config;
pub mod store;
pub mod subcommands;
pub mod utils;
pub mod services;

View File

@@ -5,8 +5,10 @@ mod config;
mod store; mod store;
mod subcommands; mod subcommands;
mod utils; mod utils;
mod services;
fn main() { #[tokio::main]
async fn main() {
let app = Command::new("Nextsync") let app = Command::new("Nextsync")
.version("1.0") .version("1.0")
.author("grimhilt") .author("grimhilt")
@@ -15,6 +17,10 @@ fn main() {
subcommands::init::create(), subcommands::init::create(),
subcommands::add::create(), subcommands::add::create(),
subcommands::status::create(), subcommands::status::create(),
subcommands::reset::create(),
subcommands::push::create(),
subcommands::test::create(),
subcommands::clone::create(),
]); ]);
// .setting(clap::AppSettings::SubcommandRequiredElseHelp); // .setting(clap::AppSettings::SubcommandRequiredElseHelp);
@@ -24,6 +30,10 @@ fn main() {
Some(("init", args)) => subcommands::init::handler(args), Some(("init", args)) => subcommands::init::handler(args),
Some(("add", args)) => subcommands::add::handler(args), Some(("add", args)) => subcommands::add::handler(args),
Some(("status", args)) => subcommands::status::handler(args), Some(("status", args)) => subcommands::status::handler(args),
Some(("reset", args)) => subcommands::reset::handler(args),
Some(("push", args)) => subcommands::push::handler(args),
Some(("test", args)) => subcommands::test::handler(args),
Some(("clone", args)) => subcommands::clone::handler(args).await,
Some((_, _)) => {} Some((_, _)) => {}
None => {} None => {}
}; };

5
src/services.rs Normal file
View File

@@ -0,0 +1,5 @@
pub mod downloader;
pub mod download;
pub mod enumerator;
pub mod req_props;
pub mod service;

42
src/services/download.rs Normal file
View File

@@ -0,0 +1,42 @@
use crate::services::service::{Request, Service};
use crate::store::nsobject::NsObject;
use std::{fs::OpenOptions, io::Write};
pub struct Download<'a> {
request: Request<'a>,
obj_path: String,
}
impl<'a> Download<'a> {
pub fn new(service: &'a Service) -> Self {
Download {
request: Request::new(service),
obj_path: String::new(),
}
}
pub fn set_obj_path(mut self, obj_path: String) -> Self {
self.request.get(obj_path.clone());
self.obj_path = obj_path
.strip_prefix("/")
.unwrap_or(&self.obj_path.clone())
.to_string();
self
}
pub async fn send(&mut self) -> Result<(), reqwest::Error> {
let res = self.request.send().await;
let body = res.unwrap().bytes().await?;
let mut file = OpenOptions::new()
.write(true)
.create(true)
.open(self.obj_path.clone())
.unwrap();
file.write_all(&body.to_vec()).unwrap();
NsObject::from_local_path(&self.obj_path.clone().into())
.save()
.unwrap();
Ok(())
}
}

View File

@@ -0,0 +1,26 @@
use super::{download::Download, service::Service};
pub struct Downloader<'a> {
service: &'a Service,
files: Vec<String>,
}
impl<'a> Downloader<'a> {
pub fn new(service: &'a Service) -> Self {
Downloader {
service,
files: Vec::new(),
}
}
pub fn set_files(mut self, files: Vec<String>) -> Self {
self.files = files;
self
}
pub async fn download(&self) {
for file in self.files.clone() {
Download::new(self.service).set_obj_path(file).send().await;
}
}
}

103
src/services/enumerator.rs Normal file
View File

@@ -0,0 +1,103 @@
use super::{
req_props::{Props, ReqProps, Response},
service::Service,
};
use crate::utils::path;
use std::sync::Arc;
use tokio::{sync::Mutex, task::JoinSet};
pub const DEFAULT_DEPTH: u16 = 3;
pub struct Enumerator<'a> {
service: &'a Service,
path: String,
depth: u16,
properties: Vec<Props>,
}
impl<'a> Enumerator<'a> {
pub fn new(service: &'a Service) -> Self {
Enumerator {
service,
path: String::new(),
depth: DEFAULT_DEPTH,
properties: Vec::new(),
}
}
pub fn set_path(mut self, path: String) -> Self {
self.path = path;
self
}
pub fn set_depth(mut self, depth: u16) -> Self {
self.depth = depth;
self
}
pub fn get_properties(mut self, properties: Vec<Props>) -> Self {
self.properties.extend(properties);
self
}
pub async fn enumerate(&self) -> Result<(Vec<Response>, Vec<Response>), std::io::Error> {
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
let files = Arc::new(Mutex::new(Vec::new()));
let folders = Arc::new(Mutex::new(Vec::new()));
let service = Arc::from(self.service.clone());
let tasks_active = Arc::new(Mutex::new(0));
tx.send(self.path.clone());
let mut taskSet = JoinSet::new();
loop {
if let Ok(path) = rx.try_recv() {
let current_depth = path::get_depth(&path);
*tasks_active.lock().await += 1;
let tx_clone = tx.clone();
let files_clone = Arc::clone(&files);
let folders_clone = Arc::clone(&folders);
let service_clone = Arc::clone(&service);
let properties = self.properties.clone();
let depth = self.depth.clone();
let tasks_active_clone = Arc::clone(&tasks_active);
let task = taskSet.spawn(async move {
let res = ReqProps::new(&service_clone)
.set_path(path.clone())
.set_depth(depth)
.get_properties(properties)
.send()
.await
.unwrap();
for obj in res.responses {
if obj.is_dir() {
// Avoid enumerating the same folder multiple times
if obj.abs_path() != &path {
// depth deeper than current + self.depth
if obj.path_depth() >= current_depth + depth {
tx_clone.send(obj.abs_path().to_owned()).unwrap();
}
folders_clone.lock().await.push(obj);
}
} else {
files_clone.lock().await.push(obj);
}
}
*tasks_active_clone.lock().await -= 1;
});
} else if *tasks_active.lock().await <= 0 {
break;
}
}
taskSet.join_all().await;
Ok((
Arc::try_unwrap(files).unwrap().into_inner(),
Arc::try_unwrap(folders).unwrap().into_inner(),
))
}
}

166
src/services/req_props.rs Normal file
View File

@@ -0,0 +1,166 @@
use crate::services::service::{Request, Service};
use crate::store::structs::{to_obj_path, ObjPath};
use crate::utils::path;
use serde::Deserialize;
use serde_xml_rs::from_str;
use std::path::PathBuf;
#[derive(Clone)]
pub enum Props {
CreationDate,
LastModified,
ETag,
ContentType,
RessourceType,
ContentLength,
ContentLanguage,
DisplayName,
FileId,
Permissions,
Size,
HasPreview,
Favorite,
CommentsUnread,
OwnerDisplayName,
ShareTypes,
ContainedFolderCount,
ContainedFileCountm,
}
#[derive(Deserialize, Debug)]
struct Propstat {
#[serde(rename = "prop")]
prop: Prop,
}
#[derive(Deserialize, Debug)]
struct Prop {
#[serde(rename = "getlastmodified")]
last_modified: Option<String>,
#[serde(rename = "getcontentlength")]
content_length: Option<u64>,
}
#[derive(Deserialize, Debug)]
pub struct Response {
#[serde(rename = "href")]
pub href: String,
#[serde(rename = "propstat")]
propstat: Propstat,
href_prefix: Option<String>,
}
impl Response {
pub fn is_dir(&self) -> bool {
self.href.ends_with("/")
}
pub fn abs_path(&self) -> &str {
dbg!(self.href_prefix.clone());
let path = self
.href
.strip_prefix(&format!(
"/remote.php/dav/files/{}",
self.href_prefix.clone().unwrap_or_default()
))
.expect(&format!(
"Unexpected result when requesting props. Cannot strip from {}",
self.href
));
if path.ends_with('/') {
path.strip_suffix('/').expect("Checked before")
} else {
path
}
}
pub fn obj_path(&self) -> ObjPath {
let mut path = self.abs_path();
path = path.strip_prefix("/").unwrap();
to_obj_path(&PathBuf::from(path))
}
pub fn path_depth(&self) -> u16 {
path::get_depth(self.abs_path())
}
}
#[derive(Deserialize, Debug)]
pub struct Multistatus {
#[serde(rename = "response")]
pub responses: Vec<Response>,
}
impl From<&Props> for &str {
fn from(variant: &Props) -> Self {
match variant {
Props::CreationDate => "<d:creationdate />",
Props::LastModified => "<d:getlastmodified />",
_ => todo!("Props conversion not implemented"),
}
}
}
pub struct ReqProps<'a> {
request: Request<'a>,
properties: Vec<Props>,
}
impl<'a> ReqProps<'a> {
pub fn new(service: &'a Service) -> Self {
ReqProps {
request: Request::new(service),
properties: Vec::new(),
}
}
pub fn set_path(mut self, path: String) -> Self {
self.request.propfind(path);
self
}
pub fn set_depth(mut self, depth: u16) -> Self {
self.request.headers.insert("Depth", depth.into());
self
}
pub fn get_properties(mut self, properties: Vec<Props>) -> Self {
self.properties.extend(properties);
self
}
pub fn get_property(&mut self, property: Props) {
self.properties.push(property);
}
fn get_body(&self) -> String {
let mut xml = String::from(
r#"<?xml version="1.0" encoding="UTF-8"?><d:propfind xmlns:d="DAV:" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns"><d:prop>"#,
);
for property in &self.properties {
xml.push_str(property.into());
}
xml.push_str(r#"</d:prop></d:propfind>"#);
xml
}
pub async fn send(&mut self) -> Result<Multistatus, reqwest::Error> {
let res = self.request.send().await;
let xml = res.unwrap().text().await?;
dbg!(&xml);
let mut multistatus: Multistatus =
from_str(&xml).expect("Failed to unwrap xml response from req_props");
multistatus
.responses
.iter_mut()
.for_each(|res| res.href_prefix = Some(self.request.service.href_prefix()));
Ok(multistatus)
}
}
// impl From<Obj> for ReqProps<'_> {
// fn from(obj: Obj) -> Self {
// ReqProps::new(obj.get_obj_path().to_str().unwrap())
// }
// }

151
src/services/service.rs Normal file
View File

@@ -0,0 +1,151 @@
use crate::commands::clone::UrlProps;
use reqwest::{header::HeaderMap, Method};
use reqwest::{Client, ClientBuilder, RequestBuilder};
const USER_AGENT: &str = "Nextsync";
pub struct ClientConfig {
client: Option<ClientBuilder>,
}
impl ClientConfig {
pub fn new() -> Self {
ClientConfig {
client: Some(Client::builder().user_agent(USER_AGENT)),
}
}
pub fn default_headers(mut self, headers: HeaderMap) -> Self {
self.client = Some(
self.client
.take()
.expect("Client was already built")
.default_headers(headers),
);
self
}
pub fn build(&mut self) -> Client {
self.client
.take()
.expect("Cannot build the client twice")
.build()
.unwrap()
}
}
#[derive(Clone)]
pub struct Service {
/// http[s]://host.xz/remote.php/dav/files
url_base: String,
base_path: String,
user: String,
}
impl From<&UrlProps<'_>> for Service {
fn from(url_props: &UrlProps) -> Self {
let mut url_base = if url_props.is_secure {
String::from("https://")
} else {
String::from("http://")
};
url_base.push_str(url_props.domain);
url_base.push_str("/remote.php/dav/files");
Service {
url_base,
base_path: url_props.path.to_string(),
user: url_props.user.clone(),
}
}
}
impl Service {
fn authenticate(&self, request: RequestBuilder) -> RequestBuilder {
request
.bearer_auth("rK5ud2NmrR8p586Th7v272HRgUcZcEKIEluOGjzQQRj7gWMMAISFTiJcFnnmnNiu2VVlENks")
}
fn build_url(&self, url: &str) -> String {
let mut final_url = self.url_base.clone();
final_url.push_str("/");
final_url.push_str(&self.user);
final_url.push_str(&self.base_path);
final_url.push_str(url);
final_url
}
/// Return the prefix of a href
/// /user/base_path/path -> /user/base_path
pub fn href_prefix(&self) -> String {
let mut prefix = self.user.clone();
prefix.push_str(&self.base_path);
prefix
}
}
pub struct Request<'a> {
pub service: &'a Service,
client: ClientConfig,
method: Option<Method>,
url: Option<String>,
pub headers: HeaderMap,
body: Option<String>,
}
impl<'a> Request<'a> {
pub fn new(service: &'a Service) -> Self {
Request {
service,
client: ClientConfig::new(),
method: None,
url: None,
headers: HeaderMap::new(),
body: None,
}
}
pub fn propfind(&mut self, url: String) -> &mut Self {
self.method = Some(Method::from_bytes(b"PROPFIND").expect("Cannot be invalid"));
self.url = Some(url);
self
}
pub fn get(&mut self, url: String) -> &mut Self {
self.method = Some(Method::GET);
self.url = Some(url);
self
}
pub async fn send(&mut self) -> Result<reqwest::Response, reqwest::Error> {
dbg!(self.service.build_url(&self.url.clone().expect("An url must be set")));
self.service
.authenticate(
self.client
.build()
.request(
self.method.clone().expect("Method must be set"),
self.service
.build_url(&self.url.clone().expect("An url must be set")),
)
.headers(self.headers.clone()),
)
.send()
.await
// let mut url = self
// .config
// .clone()
// .expect("A config must be provided to service")
// .get_nsconfig()
// .get_remote("origin")
// .url
// .expect("An url must be set on the remote");
// url.push_str(&self.url.clone().unwrap());
// self.client
// .build()
// .request(self.method.clone().expect("Method must be set"), url)
// .send()
// .await
}
}

View File

@@ -3,3 +3,4 @@ pub mod indexer;
pub mod nsignore; pub mod nsignore;
pub mod nsobject; pub mod nsobject;
pub mod object; pub mod object;
pub mod structs;

View File

@@ -9,6 +9,8 @@ pub struct Ignorer {
use_nsignore: bool, use_nsignore: bool,
/// Nsignore's rules /// Nsignore's rules
rules: OnceLock<Vec<String>>, rules: OnceLock<Vec<String>>,
/// Path that have been ignored by should_ignore function
pub ignored_paths: Vec<PathBuf>,
childs: Option<Vec<Box<Ignorer>>>, childs: Option<Vec<Box<Ignorer>>>,
} }
@@ -18,6 +20,7 @@ impl Ignorer {
path: path.to_path_buf(), path: path.to_path_buf(),
use_nsignore: true, use_nsignore: true,
rules: OnceLock::new(), rules: OnceLock::new(),
ignored_paths: Vec::new(),
childs: None, childs: None,
} }
} }
@@ -52,6 +55,11 @@ impl Ignorer {
/// ///
/// * `path`: /// * `path`:
pub fn should_ignore(&mut self, path: &PathBuf) -> bool { pub fn should_ignore(&mut self, path: &PathBuf) -> bool {
self.is_config_file(path) || (self.use_nsignore && self.is_ignored(path)) let should = self.is_config_file(path) || (self.use_nsignore && self.is_ignored(path));
if should {
self.ignored_paths.push(path.clone());
}
should
} }
} }

View File

@@ -1,38 +1,42 @@
use crate::store::object::ObjType; use crate::store::{
object::{Obj, ObjType},
structs::{ObjPath, to_obj_path},
};
use crate::utils::path::normalize_path;
use std::cmp::Ordering;
use std::fs::File; use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom, Write}; use std::io::{self, Write, BufRead, BufReader};
use std::path::PathBuf; use std::path::PathBuf;
fn get_level(previous_level: u16, path: &PathBuf) -> (u16, IndexLevel) { // Custom sorting function to handle paths hierarchically
let mut level = 0; fn sort_paths_hierarchically(paths: &mut Vec<PathBuf>) {
let mut path = path.clone(); paths.sort_by(|a, b| {
while path.pop() { // Split paths into components for comparison
level += 1; let a_components: Vec<_> = a.components().collect();
} let b_components: Vec<_> = b.components().collect();
let index_level = if previous_level < level { // Compare components one by one
IndexLevel::Down for (a_component, b_component) in a_components.iter().zip(b_components.iter()) {
} else { match a_component.cmp(b_component) {
IndexLevel::Up Ordering::Equal => continue,
}; ordering => return ordering,
}
}
(level, index_level) // If one path is a prefix of the other, the shorter path comes first
a_components.len().cmp(&b_components.len())
});
} }
enum IndexLevel { #[derive(Debug)]
Up, pub struct IndexedObj {
Down, pub obj_type: ObjType,
} pub path: ObjPath,
struct IndexedObj {
obj_type: ObjType,
level: IndexLevel,
path: PathBuf,
} }
pub struct Indexer { pub struct Indexer {
repo_root: PathBuf,
index_file: PathBuf, index_file: PathBuf,
previous_level: u16,
indexed_objs: Vec<IndexedObj>, indexed_objs: Vec<IndexedObj>,
} }
@@ -43,47 +47,83 @@ impl Indexer {
index_file.push("index"); index_file.push("index");
Indexer { Indexer {
repo_root: repo_root.clone(),
index_file, index_file,
previous_level: 0,
indexed_objs: Vec::new(), indexed_objs: Vec::new(),
} }
} }
pub fn load(&self) -> io::Result<()> { pub fn load(&mut self) -> io::Result<()> {
let mut file = File::open(&self.index_file)?; let file = File::open(&self.index_file)?;
let mut str = String::new(); let reader = BufReader::new(file);
// Skip reserved bytes for line in reader.lines() {
let mut byte = [0; 1]; let line = line?;
file.read_exact(&mut byte)?; let line = line.as_bytes();
file.seek(SeekFrom::Start(1))?;
// Read usize value let path_str = String::from_utf8(line[1..].to_vec()).unwrap();
let mut usize_bytes = [0; std::mem::size_of::<usize>()]; let path = PathBuf::from(path_str);
file.read_exact(&mut usize_bytes)?;
dbg!(ObjType::try_from(byte[0]));
let usize_value = usize::from_le_bytes(usize_bytes);
// Read PathBuf as string self.indexed_objs.push(IndexedObj {
let mut buffer = Vec::new(); obj_type: ObjType::try_from(line[0]).unwrap(),
file.read_to_end(&mut buffer)?; path: to_obj_path(&path),
let path_str = String::from_utf8(buffer).unwrap(); });
let path = PathBuf::from(path_str); }
println!("usize value: {}", usize_value);
println!("Path: {:?}", path);
Ok(()) Ok(())
} }
fn index_obj(&mut self, path: PathBuf, obj_type: ObjType) { pub fn load_unsafe(&mut self) {
let (level, index_level) = get_level(self.previous_level, &path); let _ = self.load();
self.previous_level = level; dbg!(&self.indexed_objs);
}
self.indexed_objs.push(IndexedObj { pub fn clear(&mut self) {
self.indexed_objs.clear();
}
pub fn get_indexed_objs(&self) -> &Vec<IndexedObj> {
&self.indexed_objs
}
fn is_indexed(&self, obj: &IndexedObj) -> bool {
self.indexed_objs
.iter()
.position(|o| o.obj_type == obj.obj_type && o.path == obj.path)
.is_some()
}
pub fn is_staged(&self, obj: &Obj) -> bool {
// self.indexed_objs.iter().position(|o| &o.obj_type == obj.get_obj_type() && &o.path == obj.get_obj_path()).is_some()
self.indexed_objs
.iter()
.position(|o| &o.path == obj.get_obj_path())
.is_some()
}
pub fn is_staged_parent(&self, obj: &Obj) -> bool {
self.indexed_objs
.iter()
.position(|o| o.path.starts_with(obj.get_obj_path()))
.is_some()
}
fn index_obj(&mut self, path: PathBuf, obj_type: ObjType) {
let mut path = path;
path = normalize_path(path); // normalize path (/foo/./bar => /foo/bar)
// change path to be relative to repo's root
path = path.strip_prefix(&self.repo_root).unwrap().to_path_buf();
let indexed_obj = IndexedObj {
obj_type, obj_type,
level: index_level, path: to_obj_path(&path),
path: path.clone(), };
});
if self.is_indexed(&indexed_obj) {
return;
}
self.indexed_objs.push(indexed_obj);
} }
pub fn index_file(&mut self, path: PathBuf) { pub fn index_file(&mut self, path: PathBuf) {
@@ -97,20 +137,16 @@ impl Indexer {
pub fn save(&self) -> io::Result<()> { pub fn save(&self) -> io::Result<()> {
let mut file = File::create(&self.index_file)?; let mut file = File::create(&self.index_file)?;
// Write reserved bytes for obj in self.indexed_objs.iter() {
let variant = ObjType::Blob; // write obj_type
let byte: u8 = variant.into(); file.write(&[obj.obj_type.clone().into()])?;
file.write_all(&[byte])?;
// Write usize value // write path
let usize_value: usize = 12; file.write(obj.path.to_str().unwrap().as_bytes())?;
let usize_bytes = usize_value.to_le_bytes();
file.write_all(&usize_bytes)?;
// Write PathBuf as string file.write(b"\n")?;
let path = PathBuf::from("/jodi/"); file.flush()?;
let path_str = path.to_str().unwrap(); }
file.write_all(path_str.as_bytes())?;
Ok(()) Ok(())
} }

View File

@@ -1,66 +1,30 @@
use crate::store::object::{Obj, ObjMetadata, ObjType}; use crate::store::{
use crypto::digest::Digest; object::{Obj, ObjMetadata, ObjType},
use crypto::sha1::Sha1; structs::{NsObjPath, ObjPath},
use std::path::PathBuf; };
use std::fs::{self, File, OpenOptions};
use std::io::{self, Write};
use std::sync::OnceLock; use std::sync::OnceLock;
use std::time::UNIX_EPOCH;
pub static REPO_ROOT: OnceLock<PathBuf> = OnceLock::new();
pub fn init(repo_root: &PathBuf) {
REPO_ROOT.set(repo_root.clone());
}
type NsObjectChilds = Vec<Box<NsObject>>; type NsObjectChilds = Vec<Box<NsObject>>;
struct NsObjectPath {
path: PathBuf,
}
impl From<&str> for NsObjectPath {
fn from(hash: &str) -> Self {
let (dir, res) = hash.split_at(2);
let mut ns_obj_path = match REPO_ROOT.get() {
Some(path) => path.clone(),
None => {
panic!("fatal: 'REPO_ROOT' not set, you must initialize nsobject before using it!")
}
};
ns_obj_path.push(dir);
ns_obj_path.push(res);
NsObjectPath { path: ns_obj_path }
}
}
impl From<&PathBuf> for NsObjectPath {
fn from(obj_path: &PathBuf) -> Self {
let mut hasher = Sha1::new();
hasher.input_str(
obj_path
.to_str()
.expect("Cannot contains non UTF-8 char in path"),
);
NsObjectPath::from(hasher.result_str().as_str())
}
}
pub struct NsObject { pub struct NsObject {
pub obj_type: ObjType, pub obj_type: ObjType,
obj_path: OnceLock<PathBuf>, /// path of the obj in the repo
nsobj_path: OnceLock<PathBuf>, obj_path: OnceLock<ObjPath>,
/// path of the nsobj file in the store
nsobj_path: OnceLock<NsObjPath>,
childs: OnceLock<NsObjectChilds>, childs: OnceLock<NsObjectChilds>,
index: usize,
} }
impl NsObject { impl NsObject {
pub fn from_local_path(path: &PathBuf) -> Self { pub fn from_local_path(path: &ObjPath) -> Self {
NsObject { NsObject {
obj_type: ObjType::Obj, obj_type: ObjType::Obj,
obj_path: OnceLock::from(path.to_path_buf()), obj_path: OnceLock::from(path.clone()),
nsobj_path: OnceLock::new(), nsobj_path: OnceLock::new(),
childs: OnceLock::new(), childs: OnceLock::new(),
index: 0,
} }
} }
@@ -68,19 +32,18 @@ impl NsObject {
NsObject { NsObject {
obj_type: ObjType::Obj, obj_type: ObjType::Obj,
obj_path: OnceLock::new(), obj_path: OnceLock::new(),
nsobj_path: OnceLock::from(NsObjectPath::from(hash).path), nsobj_path: OnceLock::from(NsObjPath::from(hash)),
childs: OnceLock::new(), childs: OnceLock::new(),
index: 0,
} }
} }
pub fn get_obj_path(&self) -> &PathBuf { pub fn get_obj_path(&self) -> &ObjPath {
self.obj_path.get_or_init(|| todo!()) self.obj_path.get_or_init(|| todo!())
} }
fn get_nsobj_path(&self) -> &PathBuf { fn get_nsobj_path(&self) -> &NsObjPath {
self.nsobj_path self.nsobj_path
.get_or_init(|| NsObjectPath::from(self.get_obj_path()).path) .get_or_init(|| NsObjPath::from(self.get_obj_path()))
} }
/// Return the existence of the nsobj in the store /// Return the existence of the nsobj in the store
@@ -121,15 +84,99 @@ impl NsObject {
/// * if it is a Tree obj after an empty line there will be the definition /// * if it is a Tree obj after an empty line there will be the definition
/// of its subobjs (one line by subobj) * /// of its subobjs (one line by subobj) *
/// obj_type + hash /// obj_type + hash
pub fn save(&self) -> Result<(), ()> { pub fn save(&self) -> io::Result<()> {
if !self.get_obj_path().exists() { if !self.get_obj_path().exists() {
// delete current obj self.delete_nsobj();
// delete reference on parent
} else { } else {
dbg!(self.get_nsobj_path());
if self.get_nsobj_path().exists() {
self.edit_nsobj();
} else {
self.create_nsobj()?;
}
} }
Ok(()) Ok(())
} }
fn delete_nsobj(&self) {
todo!("Delete nsobj");
// delete current obj
// delete reference on parent
}
fn create_nsobj(&self) -> io::Result<()> {
let nsobj_path = self.get_nsobj_path();
let mut file = {
let mut nsobj_dir = nsobj_path.clone();
nsobj_dir.pop();
if !nsobj_dir.exists() {
std::fs::create_dir_all(nsobj_dir)?;
}
File::create(&nsobj_path)?
};
// Write type
file.write(&[self.obj_type.clone().into()])?;
if self.obj_type == ObjType::Blob {
if let Some(metadata) = self.get_metadata() {
// Write size
file.write(&metadata.size.to_le_bytes())?;
// Write modified
file.write(
&metadata
.modified
.expect(&format!(
"Expect 'modified' in metadata of {} to save obj",
self.get_obj_path().as_path().display()
))
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs()
.to_le_bytes(),
)?;
} else {
todo!("Cannot load metadata")
}
}
file.write_all(b"\n")?;
// Write Path
file.write_all(self.get_obj_path().to_str().unwrap().as_bytes())?;
file.write_all(b"\n")?;
file.flush()?;
// Save itself inside its parent
let mut parent_path = self.get_obj_path().clone();
parent_path.pop();
let parent_obj = NsObject::from_local_path(&parent_path);
parent_obj.add_child(&self)?;
Ok(())
}
fn edit_nsobj(&self) {
todo!("Edit nsobj");
}
fn add_child(&self, child: &NsObject) -> io::Result<()> {
let mut file = OpenOptions::new()
.write(true)
.create(true)
.append(true)
.open(self.get_nsobj_path())?;
let child_type = &[child.obj_type.clone().into()];
let child_path = child.get_obj_path().to_str().unwrap().as_bytes();
file.write(child_type)?;
file.write(child_path)?;
file.write(b"\n")?;
file.flush()?;
Ok(())
}
pub fn get_metadata(&self) -> Option<ObjMetadata> { pub fn get_metadata(&self) -> Option<ObjMetadata> {
todo!() todo!()
} }

View File

@@ -1,15 +1,20 @@
use crate::store::nsobject::NsObject; use crate::store::{
nsobject::NsObject,
structs::{NsObjPath, ObjPath},
};
use crate::utils::path; use crate::utils::path;
use std::fs; use std::{
use std::path::PathBuf; env, fmt, fs, io,
use std::sync::OnceLock; path::PathBuf,
use std::time::SystemTime; sync::OnceLock,
time::SystemTime,
};
const MAX_SIZE_TO_USE_HASH: u64 = 12 * 1024 * 1024; const MAX_SIZE_TO_USE_HASH: u64 = 12 * 1024 * 1024;
pub struct ObjMetadata { pub struct ObjMetadata {
size: u64, pub size: u64,
modified: Option<SystemTime>, pub modified: Option<SystemTime>,
} }
#[derive(PartialEq, Clone, Debug)] #[derive(PartialEq, Clone, Debug)]
@@ -20,13 +25,18 @@ pub enum ObjType {
Tree, Tree,
} }
impl fmt::Display for ObjType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<ObjType> for u8 { impl From<ObjType> for u8 {
fn from(variant: ObjType) -> Self { fn from(variant: ObjType) -> Self {
variant as u8 variant as u8
} }
} }
impl TryFrom<u8> for ObjType { impl TryFrom<u8> for ObjType {
type Error = String; type Error = String;
@@ -40,42 +50,65 @@ impl TryFrom<u8> for ObjType {
} }
} }
#[derive(PartialEq, Clone)] #[derive(PartialEq, Clone, Debug)]
pub enum ObjStatus { pub enum ObjStatus {
Undefined, Undefined,
Unchanged,
Created, Created,
Modified,
Moved, Moved,
Copied, Copied,
Deleted, Deleted,
} }
#[derive(Clone)] #[derive(Clone, Debug)]
pub struct Obj { pub struct Obj {
obj_type: ObjType, obj_type: ObjType,
status: OnceLock<ObjStatus>, status: OnceLock<ObjStatus>,
/// path of the object from root /// path of the object from root
obj_path: PathBuf, obj_path: ObjPath,
} }
impl Obj { impl Obj {
pub fn from_local_path(path: &PathBuf) -> Self { pub fn from_local_path(path: &ObjPath) -> Self {
// todo set state let obj_path = path.clone();
Obj { Obj {
obj_type: ObjType::Obj, obj_type: {
if path.abs().exists() {
if path.abs().is_dir() {
ObjType::Tree
} else {
ObjType::Blob
}
} else {
ObjType::Obj
}
},
status: OnceLock::new(), status: OnceLock::new(),
obj_path: path.to_path_buf(), obj_path,
} }
} }
fn get_status(&self) -> &ObjStatus { pub fn get_status(&self) -> &ObjStatus {
self.status.get_or_init(|| { self.status.get_or_init(|| {
// read path let nsobj = self.get_nsobj();
ObjStatus::Created if !nsobj.exists() {
return ObjStatus::Created;
}
if !self.obj_path.exists() {
return ObjStatus::Deleted;
}
if self.obj_type != ObjType::Tree && nsobj.obj_type != ObjType::Tree {
if *self != nsobj {
return ObjStatus::Modified;
}
}
ObjStatus::Unchanged
}) })
} }
pub fn set_status(&mut self, status: ObjStatus) { pub fn set_status(&mut self, status: ObjStatus) {
todo!() self.status = OnceLock::from(status);
} }
pub fn set_type(&mut self, obj_type: ObjType) { pub fn set_type(&mut self, obj_type: ObjType) {
@@ -86,12 +119,65 @@ impl Obj {
self.get_status() == &ObjStatus::Created self.get_status() == &ObjStatus::Created
} }
pub fn get_obj_type(&self) -> &ObjType {
&self.obj_type
}
pub fn get_obj_path(&self) -> &ObjPath {
&self.obj_path
}
/// Return the path of the current object relatively to the path the
/// command was executed from.
///
/// * `repo_root`: the absolute repo's root path
pub fn get_env_relative_path(&self, repo_root: &PathBuf) -> PathBuf {
let binding = env::current_dir().unwrap();
if let Ok(root_diff) = binding.strip_prefix(repo_root) {
match self.obj_path.strip_prefix(&root_diff) {
Ok(path) => {
if path == PathBuf::from("") {
PathBuf::from("./")
} else {
path.to_path_buf()
}
}
Err(_) => {
// if cannot strip prefix then we need to go up to the root
let mut res_path = PathBuf::new();
for _ in 0..path::get_level(&root_diff.to_path_buf()) {
res_path.push("..");
}
res_path.push(&*self.obj_path);
res_path
}
}
} else {
// if cannot strip prefix then we are at the repo's root
self.obj_path.to_path_buf()
}
}
pub fn cpy_path(&self) -> String { pub fn cpy_path(&self) -> String {
path::to_string(&self.obj_path) path::to_string(&self.obj_path)
} }
fn get_nsobj(&self) -> NsObject {
NsObject::from_local_path(&self.obj_path)
}
fn get_nsobj_path(&self) -> NsObjPath {
NsObjPath::from(&self.obj_path)
}
pub fn save(&self) -> io::Result<()> {
self.get_nsobj().save()?;
Ok(())
}
pub fn get_metadata(&self) -> Option<ObjMetadata> { pub fn get_metadata(&self) -> Option<ObjMetadata> {
let metadata = match fs::metadata(&self.obj_path) { let metadata = match fs::metadata(&*self.obj_path) {
Ok(m) => m, Ok(m) => m,
Err(err) => { Err(err) => {
eprintln!( eprintln!(
@@ -113,7 +199,13 @@ impl Obj {
impl PartialEq<NsObject> for Obj { impl PartialEq<NsObject> for Obj {
fn eq(&self, other: &NsObject) -> bool { fn eq(&self, other: &NsObject) -> bool {
if self.obj_type != other.obj_type { if self.obj_type != other.obj_type {
eprintln!("Trying to compare different obj type"); eprintln!(
"{}",
format!(
"Trying to compare different obj type ({} != {})",
self.obj_type, other.obj_type
)
);
return false; return false;
} }

170
src/store/structs.rs Normal file
View File

@@ -0,0 +1,170 @@
use crate::utils::{path, tests};
use crypto::{digest::Digest, sha1::Sha1};
use std::env;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::sync::OnceLock;
static REPO_ROOT: OnceLock<PathBuf> = OnceLock::new();
pub fn init(repo_root: &PathBuf) {
if tests::is_var_setup() {
env::set_var("REPO_ROOT_DEV", path::to_string(repo_root));
}
if REPO_ROOT.get().is_none() {
let _ = REPO_ROOT.set(repo_root.clone());
}
}
pub fn get_repo_root() -> PathBuf {
if tests::is_var_setup() {
return env::var("REPO_ROOT_DEV")
.expect("REPO_ROOT_DEV is not set")
.into();
}
match REPO_ROOT.get() {
Some(path) => path.clone(),
None => {
panic!("fatal: 'REPO_ROOT' not set, you must initialize nsobject before using it!")
}
}
}
#[derive(Debug, Clone)]
pub struct ObjPath {
path: PathBuf,
abs: OnceLock<PathBuf>,
}
impl ObjPath {
pub fn abs(&self) -> &PathBuf {
self.abs.get_or_init(|| {
let mut path = get_repo_root();
path.push(self.path.clone());
path
})
}
}
impl PartialEq for ObjPath {
fn eq(&self, other: &Self) -> bool {
if self.abs.get().is_some() && other.abs.get().is_some() {
self.abs() == other.abs()
} else {
self.path == other.path
}
}
}
impl Deref for ObjPath {
type Target = PathBuf;
fn deref(&self) -> &PathBuf {
&self.path
}
}
impl DerefMut for ObjPath {
fn deref_mut(&mut self) -> &mut PathBuf {
&mut self.path
}
}
impl AsRef<Path> for ObjPath {
fn as_ref(&self) -> &Path {
&self.path
}
}
pub fn to_obj_path(path: &PathBuf) -> ObjPath {
ObjPath {
path: path.clone(),
abs: OnceLock::new(),
}
}
impl Into<ObjPath> for &PathBuf {
fn into(self) -> ObjPath {
ObjPath {
path: path::to_repo_relative(self),
abs: OnceLock::from(self.clone()),
}
}
}
impl Into<ObjPath> for PathBuf {
fn into(self) -> ObjPath {
ObjPath {
path: path::to_repo_relative(&self),
abs: OnceLock::from(self.clone()),
}
}
}
impl Into<ObjPath> for String {
fn into(self) -> ObjPath {
ObjPath {
path: self.into(),
abs: OnceLock::new(),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct NsObjPath {
path: PathBuf,
}
impl Deref for NsObjPath {
type Target = PathBuf;
fn deref(&self) -> &PathBuf {
&self.path
}
}
impl DerefMut for NsObjPath {
fn deref_mut(&mut self) -> &mut PathBuf {
&mut self.path
}
}
impl AsRef<Path> for NsObjPath {
fn as_ref(&self) -> &Path {
&self.path
}
}
impl From<&str> for NsObjPath {
fn from(hash: &str) -> Self {
let (dir, res) = hash.split_at(2);
let mut ns_obj_path = get_repo_root();
ns_obj_path.push(".nextsync");
ns_obj_path.push("objects");
ns_obj_path.push(dir);
ns_obj_path.push(res);
NsObjPath { path: ns_obj_path }
}
}
impl From<&ObjPath> for NsObjPath {
fn from(obj_path: &ObjPath) -> Self {
// NsObjPath of root is the HEAD file
if path::get_level(obj_path) == 0 {
let mut path = get_repo_root();
path.push(".nextsync");
path.push("HEAD");
return NsObjPath { path };
}
let mut hasher = Sha1::new();
hasher.input_str(
obj_path
.to_str()
.expect("Cannot contains non UTF-8 char in path"),
);
NsObjPath::from(hasher.result_str().as_str())
}
}

View File

@@ -1,3 +1,7 @@
pub mod add; pub mod add;
pub mod init; pub mod init;
pub mod reset;
pub mod status; pub mod status;
pub mod push;
pub mod test;
pub mod clone;

58
src/subcommands/clone.rs Normal file
View File

@@ -0,0 +1,58 @@
use clap::{Arg, ArgAction, ArgMatches, Command};
use crate::commands;
use crate::config::config::Config;
pub fn create() -> Command {
Command::new("clone")
.arg(
Arg::new("remote")
.required(true)
.num_args(1)
.value_name("REMOTE")
.help("The repository to clone from. See the NEXTSYNC URLS section below for more information on specifying repositories.")
)
.arg(
Arg::new("depth")
.short('d')
.long("depth")
.required(false)
.num_args(1)
.help(format!("Depth of the recursive fetch of object properties. This value should be lower when there are a lot of files per directory and higher when there are a lot of subdirectories with fewer files. (Default: {})", crate::services::enumerator::DEFAULT_DEPTH))
)
.arg(
Arg::new("force_insecure")
.short('f')
.long("force-insecure")
.action(ArgAction::SetTrue)
.help("Force the connection to nextcloud to be in http (not https)")
)
.arg(
Arg::new("directory")
.required(false)
.num_args(1)
.value_name("DIRECTORY")
)
.about("Clone a repository into a new directory")
.after_help("
NEXTSYNC URLS
The following syntaxes may be used:
- [http[s]://]host.xz/apps/files/?dir=/path/to/repo&fileid=111111
- [http[s]://]host.xz/path/to/repo
- [http[s]://]host.xz/remote.php/dav/files/user/path/to/repo
")
}
pub async fn handler(args: &ArgMatches) {
if let Some(remote) = args.get_one::<String>("remote") {
commands::clone::exec(
commands::clone::CloneArgs {
remote: remote.to_string(),
depth: args.get_one::<String>("depth").cloned(),
force_insecure: *args.get_one::<bool>("force_insecure").unwrap(),
},
Config::from(args.get_one::<String>("directory")),
)
.await;
}
}

13
src/subcommands/push.rs Normal file
View File

@@ -0,0 +1,13 @@
use clap::{ArgMatches, Command};
use crate::commands;
use crate::commands::push::PushArgs;
use crate::config::config::Config;
pub fn create() -> Command {
Command::new("push").about("Push changes on nextcloud")
}
pub fn handler(_args: &ArgMatches) {
commands::push::exec(PushArgs {}, Config::new());
}

17
src/subcommands/reset.rs Normal file
View File

@@ -0,0 +1,17 @@
use clap::{ArgMatches, Command};
use crate::commands;
use crate::commands::reset::ResetArgs;
use crate::config::config::Config;
pub fn create() -> Command {
Command::new("reset")
.about("Clear the index")
}
pub fn handler(_args: &ArgMatches) {
commands::reset::exec(
ResetArgs {},
Config::new(),
);
}

13
src/subcommands/test.rs Normal file
View File

@@ -0,0 +1,13 @@
use clap::{Arg, ArgAction, ArgMatches, Command};
use crate::commands;
use crate::commands::test::TestArgs;
use crate::config::config::Config;
pub fn create() -> Command {
Command::new("test").about("Test command")
}
pub fn handler(args: &ArgMatches) {
commands::test::exec(TestArgs {}, Config::new());
}

View File

@@ -1 +1,2 @@
pub mod path; pub mod path;
pub mod tests;

View File

@@ -1,9 +1,61 @@
use std::path::{Path, PathBuf}; use std::path::{Component, Path, PathBuf};
use crate::store::structs;
pub fn to_repo_relative(path: &PathBuf, root: &PathBuf) -> PathBuf { pub fn to_repo_relative(path: &PathBuf) -> PathBuf {
path.strip_prefix(root).unwrap().to_path_buf() let root = structs::get_repo_root();
path.strip_prefix(&root)
.expect(&format!(
"Expect '{}' to be in the repo '{}'",
path.display(),
root.display()
))
.to_path_buf()
} }
pub fn to_string(path: &PathBuf) -> String { pub fn to_string(path: &PathBuf) -> String {
path.to_str().unwrap().to_string() path.to_str().unwrap().to_string()
} }
pub fn get_level(path: &PathBuf) -> i32 {
let mut level = 0;
let mut path = path.clone();
while path.pop() {
level += 1;
}
level
}
/// Improve the path to try remove and solve .. token.
/// Taken from https://stackoverflow.com/questions/68231306/stdfscanonicalize-for-files-that-dont-exist
///
/// This assumes that `a/b/../c` is `a/c` which might be different from
/// what the OS would have chosen when b is a link. This is OK
/// for broot verb arguments but can't be generally used elsewhere
///
/// This function ensures a given path ending with '/' still
/// ends with '/' after normalization.
pub fn normalize_path<P: AsRef<Path>>(path: P) -> PathBuf {
let ends_with_slash = path.as_ref().to_str().map_or(false, |s| s.ends_with('/'));
let mut normalized = PathBuf::new();
for component in path.as_ref().components() {
match &component {
Component::ParentDir => {
if !normalized.pop() {
normalized.push(component);
}
}
_ => {
normalized.push(component);
}
}
}
if ends_with_slash {
normalized.push("");
}
normalized
}
/// Calculate depth of a path
pub fn get_depth(path: &str) -> u16 {
path.split("/").count() as u16
}

5
src/utils/tests.rs Normal file
View File

@@ -0,0 +1,5 @@
use std::env;
pub fn is_var_setup() -> bool {
env::var("RUNNING_TESTS").is_ok()
}

49
tests/add_test.rs Normal file
View File

@@ -0,0 +1,49 @@
mod common;
use common::client::ClientTest;
use nextsync::store::{structs::to_obj_path, indexer::Indexer};
use nextsync::commands::status::{get_obj_changes, StatusArgs};
use nextsync::config::config::Config;
use std::io;
use std::path::PathBuf;
const DEFAULT_STATUS_ARG: StatusArgs = StatusArgs { nostyle: false };
fn indexed_expected(indexer: &Indexer, expected: Vec<&str>) {
let objs = indexer.get_indexed_objs();
assert_eq!(objs.len(), expected.len());
for obj in expected {
assert!(objs
.iter()
.position(|e| { e.path == to_obj_path(&PathBuf::from(obj)) })
.is_some());
}
}
#[test]
fn add_ignored_file() -> io::Result<()> {
let mut client = ClientTest::new("add__simple_file").init();
client.add_ignore_rule("foo");
client.add_file("foo", "foo")?;
let mut indexer = Indexer::new(client.get_config().get_root_unsafe());
client.exec_ok("add foo");
let _ = indexer.load();
indexed_expected(&indexer, vec![]);
client.exec_ok("add foo -f");
let _ = indexer.load();
indexed_expected(&indexer, vec!["foo"]);
client.ok()
}
// add double globbing
// add all
// add folders
// add part of folders
// add all folder
// add automatic ignored if is tracked

167
tests/common/client.rs Normal file
View File

@@ -0,0 +1,167 @@
use nextsync::config::config::Config;
use rand::{distributions::Alphanumeric, Rng};
use std::env;
use std::fs::{self, File, OpenOptions};
use std::io::{self, Write};
use std::path::PathBuf;
use std::process::{Command, Output};
use std::str;
use std::sync::OnceLock;
// Absolute path of the nextsync executable
static EXE_PATH: OnceLock<PathBuf> = OnceLock::new();
pub struct ClientTest {
volume: String, // temp dir for the test
pub test_id: String, // name of the test (e.g nextsync_rand)
}
pub fn get_random_test_id() -> String {
rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(4)
.map(char::from)
.collect()
}
impl ClientTest {
pub fn new(id: &str) -> Self {
let _ = EXE_PATH.get_or_init(|| {
let mut exe_path = env::current_dir().unwrap();
exe_path = exe_path.join("target/debug/nextsync");
exe_path
});
let mut test_id = id.to_string();
test_id.push_str("_");
test_id.push_str(&get_random_test_id());
// create a directory in /tmp with the given id
let mut vol = String::from("/tmp/nextsync/");
vol.push_str(&test_id);
let _ = fs::remove_dir_all(&vol);
let _ = fs::create_dir_all(&vol);
// Setup the current dir to the local repo
env::set_current_dir(&vol).unwrap();
// build the client
ClientTest {
volume: vol,
test_id,
}
}
pub fn init(mut self) -> Self {
self.exec_ok("init");
println!("========== {} ========== ", &self.test_id);
// set remote url
// let url = String::from(format!("{}@nextcloud.local/{}", self.user, self.test_id));
// self.run_cmd_ok(&format!("remote add origin {}", url));
// // set force_unsecure as debug server has not certificate
// self.run_cmd_ok("config set force_insecure true");
// // set token for request
// self.run_cmd_ok(&format!("credential add {} {}", self.user, self.user));
self
}
pub fn get_config(&self) -> Config {
Config::from(Some(&self.volume))
}
pub fn new_config(&self, path: &str) -> Config {
let mut full_path = self.volume.clone();
full_path.push_str("/");
full_path.push_str(path);
Config::from(Some(&full_path))
}
pub fn set_execution_path(&self, path: &str) {
let mut new_execution_path = self.volume.clone();
new_execution_path.push_str("/");
new_execution_path.push_str(path);
env::set_current_dir(new_execution_path).unwrap();
}
pub fn ok(self) -> io::Result<()> {
fs::remove_dir_all(&self.volume)?;
Ok(())
}
pub fn exec_ok(&mut self, args: &str) -> Output {
let output = self.exec(args);
if !output.status.success() {
println!("id: {}", self.test_id.clone());
println!("Failed to execute: '{}'", args);
println!("stderr: {}", String::from_utf8_lossy(&output.stderr));
println!("stdout: {}", String::from_utf8_lossy(&output.stdout));
}
assert!(output.status.success());
output
}
pub fn exec(&mut self, args: &str) -> Output {
let output = Command::new(EXE_PATH.get().unwrap().to_str().unwrap())
.current_dir(self.volume.clone())
.args(args.split(" "))
.output()
.expect("Could not execute nextsync command");
return output;
}
pub fn add_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let _ = fs::create_dir_all(path)?;
Ok(())
}
pub fn add_file(&mut self, name: &str, content: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let mut file = File::create(path)?;
file.write_all(content.as_bytes())?;
Ok(())
}
pub fn remove_file(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_file(path)?;
Ok(())
}
pub fn remove_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_dir_all(path)?;
Ok(())
}
pub fn add_ignore_rule(&self, rule: &str) {
let mut nsignore_path = self.volume.clone();
nsignore_path.push_str("/.nsignore");
let mut file = OpenOptions::new()
.write(true)
.create(true)
.append(true)
.open(nsignore_path)
.unwrap();
let _ = writeln!(file, "{rule}").unwrap();
}
// pub fn has_file(&mut self, file: &str, content: &str) -> bool {
// let full_path = PathBuf::from(self.volume.clone()).join(file);
// // has_files(full_path, file, content, self.test_id.clone())
// }
}

2
tests/common/mod.rs Normal file
View File

@@ -0,0 +1,2 @@
pub mod client;
pub mod utils;

0
tests/common/utils.rs Normal file
View File

4
tests/reset_test.rs Normal file
View File

@@ -0,0 +1,4 @@
// reset all
// reset file
// reset folder
// reset unknown

1
tests/status/main.rs Normal file
View File

@@ -0,0 +1 @@
mod status_test;

View File

@@ -0,0 +1,14 @@
// #[cfg(test)]
// mod status_tests {
// #[test]
// fn status_test() {
// let client = ClientTest::new("").init();
// }
// // basic add
// // add all folder
// // add all folder current
// // ../folder/file add
// // add part of folder
// }

135
tests/status_new_test.rs Normal file
View File

@@ -0,0 +1,135 @@
mod common;
use common::client::ClientTest;
use nextsync::store::object::Obj;
use nextsync::commands::status::{get_obj_changes, StatusArgs};
use nextsync::config::config::Config;
use std::io;
use std::path::PathBuf;
const DEFAULT_STATUS_ARG: StatusArgs = StatusArgs { nostyle: false };
fn compare_vect(vec1: Vec<Obj>, vec2: Vec<&str>, config: &Config) {
for obj in vec2 {
assert!(
vec1.iter()
.position(|e| {
e.get_env_relative_path(config.get_root_unsafe()) == PathBuf::from(obj)
})
.is_some(),
"{:?}",
vec1.iter()
.map(|e| { e.get_env_relative_path(config.get_root_unsafe()) })
.collect::<Vec<PathBuf>>()
);
}
}
fn status_expected(config: &Config, staged: Vec<&str>, not_staged: Vec<&str>) {
let res = get_obj_changes(config);
assert_eq!(res.staged.len(), staged.len());
assert_eq!(res.not_staged.len(), not_staged.len());
compare_vect(res.staged, staged, &config);
compare_vect(res.not_staged, not_staged, &config);
}
#[test]
fn simple_file() -> io::Result<()> {
let mut client = ClientTest::new("status__simple_file").init();
client.add_file("foo", "foo")?;
status_expected(&client.get_config(), vec![], vec!["foo"]);
client.exec_ok("add foo");
status_expected(&client.get_config(), vec!["foo"], vec![]);
client.ok()
}
#[test]
fn all_folder() -> io::Result<()> {
let mut client = ClientTest::new("status__all_folder").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foo", "foo")?;
status_expected(&client.get_config(), vec![], vec!["foo", "dir"]);
client.exec_ok("add dir");
status_expected(
&client.get_config(),
vec!["dir/foo", "dir/bar"],
vec!["foo"],
);
client.ok()
}
#[test]
fn all_folder_current() -> io::Result<()> {
let mut client = ClientTest::new("status__all_folder_current").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foor", "foor")?;
status_expected(&client.get_config(), vec![], vec!["foor", "dir"]);
client.exec_ok("add dir");
status_expected(
&client.new_config("dir"),
vec!["dir/foo", "dir/bar"],
vec![],
);
client.ok()
}
#[test]
fn relative_path() -> io::Result<()> {
let mut client = ClientTest::new("status__all_folder_current").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foor", "foor")?;
status_expected(&client.get_config(), vec![], vec!["foor", "dir"]);
client.exec_ok("add dir");
client.set_execution_path("dir");
status_expected(
&client.get_config(),
vec!["foo", "bar"],
vec!["../foor"],
);
client.ok()
}
#[test]
fn part_of_folder() -> io::Result<()> {
let mut client = ClientTest::new("status__part_of_folder").init();
client.add_dir("dir")?;
client.add_file("dir/foo", "foo")?;
client.add_file("dir/bar", "bar")?;
client.add_file("foor", "foor")?;
status_expected(&client.get_config(), vec![], vec!["foor", "dir"]);
client.exec_ok("add dir/foo");
status_expected(
&client.get_config(),
vec!["dir/foo"],
vec!["foor", "dir/bar"],
);
client.ok()
}
// ../folder/file add
// force add ignored file
// status without ignored file
// all folder without ignored file

5
tests/tests.rs Normal file
View File

@@ -0,0 +1,5 @@
#[cfg(test)]
mod tests {
// mod status;
}

110
tests/utils/client.rs Normal file
View File

@@ -0,0 +1,110 @@
#[cfg(test)]
pub struct ClientTest {
volume: String, // temp dir for the test
pub test_id: String, // name of the test (e.g nextsync_rand)
exe_path: PathBuf, // absolute path of nextsync executable
}
#[cfg(test)]
impl ClientTest {
pub fn new(id: String) -> Self {
// create a directory in /tmp with the given id
let mut vol = String::from("/tmp/");
vol.push_str(&id);
let _ = fs::create_dir(vol.clone());
// get nextsync path
let mut exe_path = env::current_dir().unwrap();
exe_path = exe_path.join("target/debug/nextsync");
// build the client
ClientTest {
user: String::from("admin"),
volume: vol,
test_id: id,
exe_path
}
}
pub fn init(mut self) -> Self {
self.run_cmd_ok("init");
// set remote url
let url = String::from(format!("{}@nextcloud.local/{}", self.user, self.test_id));
self.run_cmd_ok(&format!("remote add origin {}", url));
// set force_unsecure as debug server has not certificate
self.run_cmd_ok("config set force_insecure true");
// set token for request
self.run_cmd_ok(&format!("credential add {} {}", self.user, self.user));
self
}
pub fn clean(self) -> Self {
let _ = fs::remove_dir_all(&self.volume);
self
}
pub fn run_cmd_ok(&mut self, args: &str) -> Output {
let output = self.run_cmd(args);
if !output.status.success() {
println!("id: {}", self.test_id.clone());
println!("Failed to execute: '{}'", args);
println!("stderr: {}", String::from_utf8_lossy(&output.stderr));
println!("stdout: {}", String::from_utf8_lossy(&output.stdout));
}
assert!(output.status.success());
output
}
pub fn run_cmd(&mut self, args: &str) -> Output {
let output = Command::new(self.exe_path.to_str().unwrap())
.current_dir(self.volume.clone())
.args(args.split(" "))
.output()
.expect("Could not execute nextsync command");
return output;
}
pub fn add_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let _ = fs::create_dir_all(path)?;
Ok(())
}
pub fn add_file(&mut self, name: &str, content: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
let mut file = File::create(path)?;
file.write_all(content.as_bytes())?;
Ok(())
}
pub fn remove_file(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_file(path)?;
Ok(())
}
pub fn remove_dir(&mut self, name: &str) -> std::io::Result<()> {
let mut path = self.volume.clone();
path.push_str("/");
path.push_str(name);
fs::remove_dir_all(path)?;
Ok(())
}
pub fn has_file(&mut self, file: &str, content: &str) -> bool {
let full_path = PathBuf::from(self.volume.clone()).join(file);
has_files(full_path, file, content, self.test_id.clone())
}
}