diff --git a/.gitignore b/.gitignore index 1952496d..42a0fc28 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ /.env -/data.db* /config.toml -/uploads/ +/data.db* +/data_v2.db* +/target +/uploads/ \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index e7a6994e..0246f562 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -300,6 +300,17 @@ dependencies = [ "num-traits 0.2.14", ] +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.0.1" @@ -444,6 +455,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "colored" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd" +dependencies = [ + "atty", + "lazy_static", + "winapi", +] + [[package]] name = "config" version = "0.11.0" @@ -941,6 +963,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "home" version = "0.5.3" @@ -1545,6 +1576,18 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.3", + "hmac", + "password-hash", + "sha2", +] + [[package]] name = "pem" version = "1.1.0" @@ -2375,6 +2418,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "text-colorizer" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b30f9b94bd367aacc3f62cd28668b10c7ae1784c7d27e223a1c21646221a9166" +dependencies = [ + "colored", +] + [[package]] name = "thiserror" version = "1.0.34" @@ -2577,6 +2629,7 @@ dependencies = [ "futures", "jsonwebtoken", "lettre", + "pbkdf2", "rand_core", "regex", "reqwest", @@ -2588,6 +2641,7 @@ dependencies = [ "serde_json", "sha-1 0.10.0", "sqlx", + "text-colorizer", "tokio", "toml", "urlencoding", diff --git a/Cargo.toml b/Cargo.toml index 4d43f3e7..f16bfeeb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ name = "torrust-index-backend" version = "2.0.0-dev.1" authors = ["Mick van Dijke ", "Wesley Bijleveld "] edition = "2021" +default-run = "main" [profile.dev.package.sqlx-macros] opt-level = 3 @@ -33,3 +34,5 @@ tokio = {version = "1.13", features = ["macros", "io-util", "net", "time", "rt-m lettre = { version = "0.10.0-rc.3", features = ["builder", "tokio1", "tokio1-rustls-tls", "smtp-transport"]} sailfish = "0.4.0" regex = "1.6.0" +pbkdf2 = "0.11.0" +text-colorizer = "1.0.0" diff --git a/build.rs b/build.rs new file mode 100644 index 00000000..d5068697 --- /dev/null +++ b/build.rs @@ -0,0 +1,5 @@ +// generated by `sqlx migrate build-script` +fn main() { + // trigger recompilation when a new migration is added + println!("cargo:rerun-if-changed=migrations"); +} diff --git a/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql new file mode 100644 index 00000000..92949e96 --- /dev/null +++ b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users CHANGE date_registered date_registered DATETIME DEFAULT NULL \ No newline at end of file diff --git a/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql b/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql new file mode 100644 index 00000000..352a5e8f --- /dev/null +++ b/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users ADD COLUMN date_imported DATETIME DEFAULT NULL \ No newline at end of file diff --git a/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql new file mode 100644 index 00000000..5757849c --- /dev/null +++ b/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS torrust_users_new ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + date_registered TEXT DEFAULT NULL, + administrator BOOL NOT NULL DEFAULT FALSE +); + +INSERT INTO torrust_users_new SELECT * FROM torrust_users; + +DROP TABLE torrust_users; + +ALTER TABLE torrust_users_new RENAME TO torrust_users \ No newline at end of file diff --git a/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql b/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql new file mode 100644 index 00000000..96dddd2f --- /dev/null +++ b/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users ADD COLUMN date_imported TEXT DEFAULT NULL \ No newline at end of file diff --git a/src/bin/import_tracker_statistics.rs b/src/bin/import_tracker_statistics.rs new file mode 100644 index 00000000..3f8456c4 --- /dev/null +++ b/src/bin/import_tracker_statistics.rs @@ -0,0 +1,10 @@ +//! Import Tracker Statistics command. +//! It imports the number of seeders and leechers for all torrent from the linked tracker. +//! You can execute it with: `cargo run --bin import_tracker_statistics` + +use torrust_index_backend::console::commands::import_tracker_statistics::run_importer; + +#[actix_web::main] +async fn main() { + run_importer().await; +} diff --git a/src/main.rs b/src/bin/main.rs similarity index 100% rename from src/main.rs rename to src/bin/main.rs diff --git a/src/bin/upgrade.rs b/src/bin/upgrade.rs new file mode 100644 index 00000000..874f0fad --- /dev/null +++ b/src/bin/upgrade.rs @@ -0,0 +1,10 @@ +//! Upgrade command. +//! It updates the application from version v1.0.0 to v2.0.0. +//! You can execute it with: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads` + +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::run_upgrader; + +#[actix_web::main] +async fn main() { + run_upgrader().await; +} diff --git a/src/console/commands/import_tracker_statistics.rs b/src/console/commands/import_tracker_statistics.rs new file mode 100644 index 00000000..f5dba839 --- /dev/null +++ b/src/console/commands/import_tracker_statistics.rs @@ -0,0 +1,86 @@ +//! It imports statistics for all torrents from the linked tracker. + +use std::env; +use std::sync::Arc; + +use derive_more::{Display, Error}; +use text_colorizer::*; + +use crate::config::Configuration; +use crate::databases::database::connect_database; +use crate::tracker::TrackerService; + +const NUMBER_OF_ARGUMENTS: usize = 0; + +#[derive(Debug)] +pub struct Arguments {} + +#[derive(Debug, Display, PartialEq, Error)] +#[allow(dead_code)] +pub enum ImportError { + #[display(fmt = "internal server error")] + WrongNumberOfArgumentsError, +} + +fn parse_args() -> Result { + let args: Vec = env::args().skip(1).collect(); + + if args.len() != NUMBER_OF_ARGUMENTS { + eprintln!( + "{} wrong number of arguments: expected {}, got {}", + "Error".red().bold(), + NUMBER_OF_ARGUMENTS, + args.len() + ); + print_usage(); + return Err(ImportError::WrongNumberOfArgumentsError); + } + + Ok(Arguments {}) +} + +fn print_usage() { + eprintln!( + "{} - imports torrents statistics from linked tracker. + + cargo run --bin upgrade SOURCE_DB_FILE DESTINY_DB_FILE TORRENT_UPLOAD_DIR + + For example: + + cargo run --bin import_tracker_statistics + + ", + "Upgrader".green() + ); +} + +pub async fn run_importer() { + import(&parse_args().unwrap()).await; +} + +pub async fn import(_args: &Arguments) { + println!("Importing statistics from linked tracker ..."); + + let cfg = match Configuration::load_from_file().await { + Ok(config) => Arc::new(config), + Err(error) => { + panic!("{}", error) + } + }; + + let settings = cfg.settings.read().await; + + let tracker_url = settings.tracker.url.clone(); + + eprintln!("Tracker url: {}", tracker_url.green()); + + let database = Arc::new( + connect_database(&settings.database.connect_url) + .await + .expect("Database error."), + ); + + let tracker_service = Arc::new(TrackerService::new(cfg.clone(), database.clone())); + + tracker_service.update_torrents().await.unwrap(); +} diff --git a/src/console/commands/mod.rs b/src/console/commands/mod.rs new file mode 100644 index 00000000..6dad4966 --- /dev/null +++ b/src/console/commands/mod.rs @@ -0,0 +1 @@ +pub mod import_tracker_statistics; diff --git a/src/console/mod.rs b/src/console/mod.rs new file mode 100644 index 00000000..82b6da3c --- /dev/null +++ b/src/console/mod.rs @@ -0,0 +1 @@ +pub mod commands; diff --git a/src/lib.rs b/src/lib.rs index 5a0100c3..0d2cc49e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,12 +1,14 @@ pub mod auth; pub mod common; pub mod config; +pub mod console; pub mod databases; pub mod errors; pub mod mailer; pub mod models; pub mod routes; pub mod tracker; +pub mod upgrades; pub mod utils; trait AsCSV { diff --git a/src/models/torrent_file.rs b/src/models/torrent_file.rs index 581d73a8..ff34be5e 100644 --- a/src/models/torrent_file.rs +++ b/src/models/torrent_file.rs @@ -39,6 +39,32 @@ pub struct TorrentInfo { pub root_hash: Option, } +impl TorrentInfo { + /// torrent file can only hold a pieces key or a root hash key: + /// http://www.bittorrent.org/beps/bep_0030.html + pub fn get_pieces_as_string(&self) -> String { + match &self.pieces { + None => "".to_string(), + Some(byte_buf) => bytes_to_hex(byte_buf.as_ref()), + } + } + + pub fn get_root_hash_as_i64(&self) -> i64 { + match &self.root_hash { + None => 0i64, + Some(root_hash) => root_hash.parse::().unwrap(), + } + } + + pub fn is_a_single_file_torrent(&self) -> bool { + self.length.is_some() + } + + pub fn is_a_multiple_file_torrent(&self) -> bool { + self.files.is_some() + } +} + #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct Torrent { pub info: TorrentInfo, // @@ -174,6 +200,27 @@ impl Torrent { } } } + + pub fn announce_urls(&self) -> Vec { + if self.announce_list.is_none() { + return vec![self.announce.clone().unwrap()]; + } + + self.announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>() + } + + pub fn is_a_single_file_torrent(&self) -> bool { + self.info.is_a_single_file_torrent() + } + + pub fn is_a_multiple_file_torrent(&self) -> bool { + self.info.is_a_multiple_file_torrent() + } } #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] diff --git a/src/models/tracker_key.rs b/src/models/tracker_key.rs index 71bf51c3..b1baea72 100644 --- a/src/models/tracker_key.rs +++ b/src/models/tracker_key.rs @@ -6,3 +6,15 @@ pub struct TrackerKey { pub key: String, pub valid_until: i64, } + +#[derive(Debug, Serialize, Deserialize)] +pub struct NewTrackerKey { + pub key: String, + pub valid_until: Duration, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Duration { + pub secs: i64, + pub nanos: i64, +} diff --git a/src/models/user.rs b/src/models/user.rs index f64b88b4..9a500d4d 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -3,7 +3,8 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct User { pub user_id: i64, - pub date_registered: String, + pub date_registered: Option, + pub date_imported: Option, pub administrator: bool, } @@ -33,7 +34,8 @@ pub struct UserCompact { #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserFull { pub user_id: i64, - pub date_registered: String, + pub date_registered: Option, + pub date_imported: Option, pub administrator: bool, pub username: String, pub email: String, diff --git a/src/routes/user.rs b/src/routes/user.rs index 6b535bc6..df9a385a 100644 --- a/src/routes/user.rs +++ b/src/routes/user.rs @@ -2,6 +2,7 @@ use actix_web::{web, HttpRequest, HttpResponse, Responder}; use argon2::password_hash::SaltString; use argon2::{Argon2, PasswordHash, PasswordHasher, PasswordVerifier}; use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; +use pbkdf2::Pbkdf2; use rand_core::OsRng; use serde::{Deserialize, Serialize}; @@ -10,6 +11,7 @@ use crate::config::EmailOnSignup; use crate::errors::{ServiceError, ServiceResult}; use crate::mailer::VerifyClaims; use crate::models::response::{OkResponse, TokenResponse}; +use crate::models::user::UserAuthentication; use crate::utils::regex::validate_email_address; use crate::utils::time::current_time; @@ -139,16 +141,7 @@ pub async fn login(payload: web::Json, app_data: WebAppData) -> ServiceRe .await .map_err(|_| ServiceError::InternalServerError)?; - // wrap string of the hashed password into a PasswordHash struct for verification - let parsed_hash = PasswordHash::new(&user_authentication.password_hash)?; - - // verify if the user supplied and the database supplied passwords match - if Argon2::default() - .verify_password(payload.password.as_bytes(), &parsed_hash) - .is_err() - { - return Err(ServiceError::WrongPasswordOrUsername); - } + verify_password(payload.password.as_bytes(), &user_authentication)?; let settings = app_data.cfg.settings.read().await; @@ -174,6 +167,30 @@ pub async fn login(payload: web::Json, app_data: WebAppData) -> ServiceRe })) } +/// Verify if the user supplied and the database supplied passwords match +pub fn verify_password(password: &[u8], user_authentication: &UserAuthentication) -> Result<(), ServiceError> { + // wrap string of the hashed password into a PasswordHash struct for verification + let parsed_hash = PasswordHash::new(&user_authentication.password_hash)?; + + match parsed_hash.algorithm.as_str() { + "argon2id" => { + if Argon2::default().verify_password(password, &parsed_hash).is_err() { + return Err(ServiceError::WrongPasswordOrUsername); + } + + Ok(()) + } + "pbkdf2-sha256" => { + if Pbkdf2.verify_password(password, &parsed_hash).is_err() { + return Err(ServiceError::WrongPasswordOrUsername); + } + + Ok(()) + } + _ => Err(ServiceError::WrongPasswordOrUsername), + } +} + pub async fn verify_token(payload: web::Json, app_data: WebAppData) -> ServiceResult { // verify if token is valid let _claims = app_data.auth.verify_jwt(&payload.token).await?; @@ -262,3 +279,37 @@ pub async fn ban_user(req: HttpRequest, app_data: WebAppData) -> ServiceResult Arc { + let source_database_connect_url = format!("sqlite://{}?mode=ro", db_filename); + Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) +} + +pub async fn new_db(db_filename: &str) -> Arc { + let target_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); + Arc::new(SqliteDatabaseV2_0_0::new(&target_database_connect_url).await) +} + +pub async fn migrate_target_database(target_database: Arc) { + println!("Running migrations in the target database..."); + target_database.migrate().await; +} + +pub async fn reset_target_database(target_database: Arc) { + println!("Truncating all tables in target database ..."); + target_database + .delete_all_database_rows() + .await + .expect("Can't reset the target database."); +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs new file mode 100644 index 00000000..1f4987c6 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -0,0 +1,104 @@ +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query_as, SqlitePool}; + +use crate::databases::database::DatabaseError; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV1 { + pub category_id: i64, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, Clone)] +pub struct UserRecordV1 { + pub user_id: i64, + pub username: String, + pub email: String, + pub email_verified: bool, + pub password: String, + pub administrator: bool, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TrackerKeyRecordV1 { + pub key_id: i64, + pub user_id: i64, + pub key: String, + pub valid_until: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentRecordV1 { + pub torrent_id: i64, + pub uploader: String, + pub info_hash: String, + pub title: String, + pub category_id: i64, + pub description: Option, + pub upload_date: i64, + pub file_size: i64, + pub seeders: i64, + pub leechers: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentFileRecordV1 { + pub file_id: i64, + pub torrent_uid: i64, + pub number: i64, + pub path: String, + pub length: i64, +} + +pub struct SqliteDatabaseV1_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV1_0_0 { + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { + query_as::<_, CategoryRecordV1>("SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC") + .fetch_all(&self.pool) + .await + .map_err(|_| DatabaseError::Error) + } + + pub async fn get_users(&self) -> Result, sqlx::Error> { + query_as::<_, UserRecordV1>("SELECT * FROM torrust_users ORDER BY user_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_user_by_username(&self, username: &str) -> Result { + query_as::<_, UserRecordV1>("SELECT * FROM torrust_users WHERE username = ?") + .bind(username) + .fetch_one(&self.pool) + .await + } + + pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { + query_as::<_, TrackerKeyRecordV1>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrents(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentRecordV1>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentFileRecordV1>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") + .fetch_all(&self.pool) + .await + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs new file mode 100644 index 00000000..35207ad4 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -0,0 +1,290 @@ +use chrono::{DateTime, NaiveDateTime, Utc}; +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; +use sqlx::{query, query_as, SqlitePool}; + +use super::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; +use crate::databases::database::DatabaseError; +use crate::models::torrent_file::{TorrentFile, TorrentInfo}; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV2 { + pub category_id: i64, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentRecordV2 { + pub torrent_id: i64, + pub uploader_id: i64, + pub category_id: i64, + pub info_hash: String, + pub size: i64, + pub name: String, + pub pieces: String, + pub piece_length: i64, + pub private: Option, + pub root_hash: i64, + pub date_uploaded: String, +} + +impl TorrentRecordV2 { + pub fn from_v1_data(torrent: &TorrentRecordV1, torrent_info: &TorrentInfo, uploader: &UserRecordV1) -> Self { + Self { + torrent_id: torrent.torrent_id, + uploader_id: uploader.user_id, + category_id: torrent.category_id, + info_hash: torrent.info_hash.clone(), + size: torrent.file_size, + name: torrent_info.name.clone(), + pieces: torrent_info.get_pieces_as_string(), + piece_length: torrent_info.piece_length, + private: torrent_info.private, + root_hash: torrent_info.get_root_hash_as_i64(), + date_uploaded: convert_timestamp_to_datetime(torrent.upload_date), + } + } +} + +pub fn convert_timestamp_to_datetime(timestamp: i64) -> String { + // The expected format in database is: 2022-11-04 09:53:57 + // MySQL uses a DATETIME column and SQLite uses a TEXT column. + + let naive_datetime = NaiveDateTime::from_timestamp(timestamp, 0); + let datetime_again: DateTime = DateTime::from_utc(naive_datetime, Utc); + + // Format without timezone + datetime_again.format("%Y-%m-%d %H:%M:%S").to_string() +} + +pub struct SqliteDatabaseV2_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV2_0_0 { + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn migrate(&self) { + sqlx::migrate!("migrations/sqlite3") + .run(&self.pool) + .await + .expect("Could not run database migrations.") + } + + pub async fn reset_categories_sequence(&self) -> Result { + query("DELETE FROM `sqlite_sequence` WHERE `name` = 'torrust_categories'") + .execute(&self.pool) + .await + .map_err(|_| DatabaseError::Error) + } + + pub async fn get_categories(&self) -> Result, DatabaseError> { + query_as::<_, CategoryRecordV2>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") + .fetch_all(&self.pool) + .await + .map_err(|_| DatabaseError::Error) + } + + pub async fn insert_category_and_get_id(&self, category_name: &str) -> Result { + query("INSERT INTO torrust_categories (name) VALUES (?)") + .bind(category_name) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + if err.message().contains("UNIQUE") { + DatabaseError::CategoryAlreadyExists + } else { + DatabaseError::Error + } + } + _ => DatabaseError::Error, + }) + } + + pub async fn insert_category(&self, category: &CategoryRecordV2) -> Result { + query("INSERT INTO torrust_categories (category_id, name) VALUES (?, ?)") + .bind(category.category_id) + .bind(category.name.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_imported_user(&self, user_id: i64, date_imported: &str, administrator: bool) -> Result { + query("INSERT INTO torrust_users (user_id, date_imported, administrator) VALUES (?, ?, ?)") + .bind(user_id) + .bind(date_imported) + .bind(administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_user_profile( + &self, + user_id: i64, + username: &str, + email: &str, + email_verified: bool, + ) -> Result { + query( + "INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)", + ) + .bind(user_id) + .bind(username) + .bind(email) + .bind(email_verified) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_user_password_hash(&self, user_id: i64, password_hash: &str) -> Result { + query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") + .bind(user_id) + .bind(password_hash) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_tracker_key( + &self, + tracker_key_id: i64, + user_id: i64, + tracker_key: &str, + date_expiry: i64, + ) -> Result { + query("INSERT INTO torrust_tracker_keys (tracker_key_id, user_id, tracker_key, date_expiry) VALUES (?, ?, ?, ?)") + .bind(tracker_key_id) + .bind(user_id) + .bind(tracker_key) + .bind(date_expiry) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent(&self, torrent: &TorrentRecordV2) -> Result { + query( + " + INSERT INTO torrust_torrents ( + torrent_id, + uploader_id, + category_id, + info_hash, + size, + name, + pieces, + piece_length, + private, + root_hash, + date_uploaded + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(torrent.torrent_id) + .bind(torrent.uploader_id) + .bind(torrent.category_id) + .bind(torrent.info_hash.clone()) + .bind(torrent.size) + .bind(torrent.name.clone()) + .bind(torrent.pieces.clone()) + .bind(torrent.piece_length) + .bind(torrent.private.unwrap_or(0)) + .bind(torrent.root_hash) + .bind(torrent.date_uploaded.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_file_for_torrent_with_one_file( + &self, + torrent_id: i64, + md5sum: &Option, + length: i64, + ) -> Result { + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH) VALUES (?, ?, ?)") + .bind(md5sum) + .bind(torrent_id) + .bind(length) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_file_for_torrent_with_multiple_files( + &self, + torrent: &TorrentRecordV1, + file: &TorrentFile, + ) -> Result { + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) VALUES (?, ?, ?, ?)") + .bind(file.md5sum.clone()) + .bind(torrent.torrent_id) + .bind(file.length) + .bind(file.path.join("/")) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_info(&self, torrent: &TorrentRecordV1) -> Result { + query("INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, ?)") + .bind(torrent.torrent_id) + .bind(torrent.title.clone()) + .bind(torrent.description.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_announce_url(&self, torrent_id: i64, tracker_url: &str) -> Result { + query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + .bind(torrent_id) + .bind(tracker_url) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { + query("DELETE FROM torrust_categories").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_torrents").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_tracker_keys").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_users").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_user_authentication") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_bans").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_user_invitations") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_profiles").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_torrents").execute(&self.pool).await.unwrap(); + + query("DELETE FROM torrust_user_public_keys") + .execute(&self.pool) + .await + .unwrap(); + + Ok(()) + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs new file mode 100644 index 00000000..afb35f90 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -0,0 +1,3 @@ +pub mod databases; +pub mod transferrers; +pub mod upgrader; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs new file mode 100644 index 00000000..f3d83d9b --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{CategoryRecordV2, SqliteDatabaseV2_0_0}; + +pub async fn transfer_categories(source_database: Arc, target_database: Arc) { + println!("Transferring categories ..."); + + let source_categories = source_database.get_categories_order_by_id().await.unwrap(); + println!("[v1] categories: {:?}", &source_categories); + + let result = target_database.reset_categories_sequence().await.unwrap(); + println!("[v2] reset categories sequence result: {:?}", result); + + for cat in &source_categories { + println!("[v2] adding category {:?} with id {:?} ...", &cat.name, &cat.category_id); + let id = target_database + .insert_category(&CategoryRecordV2 { + category_id: cat.category_id, + name: cat.name.clone(), + }) + .await + .unwrap(); + + if id != cat.category_id { + panic!( + "Error copying category {:?} from source DB to the target DB", + &cat.category_id + ); + } + + println!("[v2] category: {:?} {:?} added.", id, &cat.name); + } + + let target_categories = target_database.get_categories().await.unwrap(); + println!("[v2] categories: {:?}", &target_categories); +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs new file mode 100644 index 00000000..94eaac75 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs @@ -0,0 +1,4 @@ +pub mod category_transferrer; +pub mod torrent_transferrer; +pub mod tracker_key_transferrer; +pub mod user_transferrer; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs new file mode 100644 index 00000000..88a681f0 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs @@ -0,0 +1,191 @@ +use std::sync::Arc; +use std::{error, fs}; + +use crate::models::torrent_file::Torrent; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{SqliteDatabaseV2_0_0, TorrentRecordV2}; +use crate::utils::parse_torrent::decode_torrent; + +pub async fn transfer_torrents( + source_database: Arc, + target_database: Arc, + upload_path: &str, +) { + println!("Transferring torrents ..."); + + // Transfer table `torrust_torrents_files` + + // Although the The table `torrust_torrents_files` existed in version v1.0.0 + // it was was not used. + + // Transfer table `torrust_torrents` + + let torrents = source_database.get_torrents().await.unwrap(); + + for torrent in &torrents { + // [v2] table torrust_torrents + + println!("[v2][torrust_torrents] adding the torrent: {:?} ...", &torrent.torrent_id); + + let uploader = source_database.get_user_by_username(&torrent.uploader).await.unwrap(); + + if uploader.username != torrent.uploader { + panic!( + "Error copying torrent with id {:?}. + Username (`uploader`) in `torrust_torrents` table does not match `username` in `torrust_users` table", + &torrent.torrent_id + ); + } + + let filepath = format!("{}/{}.torrent", upload_path, &torrent.torrent_id); + + let torrent_from_file_result = read_torrent_from_file(&filepath); + + if torrent_from_file_result.is_err() { + panic!("Error torrent file not found: {:?}", &filepath); + } + + let torrent_from_file = torrent_from_file_result.unwrap(); + + let id = target_database + .insert_torrent(&TorrentRecordV2::from_v1_data(torrent, &torrent_from_file.info, &uploader)) + .await + .unwrap(); + + if id != torrent.torrent_id { + panic!( + "Error copying torrent {:?} from source DB to the target DB", + &torrent.torrent_id + ); + } + + println!("[v2][torrust_torrents] torrent with id {:?} added.", &torrent.torrent_id); + + // [v2] table torrust_torrent_files + + println!("[v2][torrust_torrent_files] adding torrent files"); + + if torrent_from_file.is_a_single_file_torrent() { + // The torrent contains only one file then: + // - "path" is NULL + // - "md5sum" can be NULL + + println!( + "[v2][torrust_torrent_files][single-file-torrent] adding torrent file {:?} with length {:?} ...", + &torrent_from_file.info.name, &torrent_from_file.info.length, + ); + + let file_id = target_database + .insert_torrent_file_for_torrent_with_one_file( + torrent.torrent_id, + // TODO: it seems med5sum can be None. Why? When? + &torrent_from_file.info.md5sum.clone(), + torrent_from_file.info.length.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_files][single-file-torrent] torrent file insert result: {:?}", + &file_id + ); + } else { + // Multiple files are being shared + let files = torrent_from_file.info.files.as_ref().unwrap(); + + for file in files.iter() { + println!( + "[v2][torrust_torrent_files][multiple-file-torrent] adding torrent file: {:?} ...", + &file + ); + + let file_id = target_database + .insert_torrent_file_for_torrent_with_multiple_files(torrent, file) + .await; + + println!( + "[v2][torrust_torrent_files][multiple-file-torrent] torrent file insert result: {:?}", + &file_id + ); + } + } + + // [v2] table torrust_torrent_info + + println!( + "[v2][torrust_torrent_info] adding the torrent info for torrent id {:?} ...", + &torrent.torrent_id + ); + + let id = target_database.insert_torrent_info(torrent).await; + + println!("[v2][torrust_torrents] torrent info insert result: {:?}.", &id); + + // [v2] table torrust_torrent_announce_urls + + println!( + "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + if torrent_from_file.announce_list.is_some() { + // BEP-0012. Multiple trackers. + + println!( + "[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + // flatten the nested vec (this will however remove the) + let announce_urls = torrent_from_file + .announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>(); + + for tracker_url in announce_urls.iter() { + println!( + "[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + let announce_url_id = target_database + .insert_torrent_announce_url(torrent.torrent_id, tracker_url) + .await; + + println!( + "[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", + &announce_url_id + ); + } + } else if torrent_from_file.announce.is_some() { + println!( + "[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + let announce_url_id = target_database + .insert_torrent_announce_url(torrent.torrent_id, &torrent_from_file.announce.unwrap()) + .await; + + println!( + "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", + &announce_url_id + ); + } + } + println!("Torrents transferred"); +} + +pub fn read_torrent_from_file(path: &str) -> Result> { + let contents = match fs::read(path) { + Ok(contents) => contents, + Err(e) => return Err(e.into()), + }; + + match decode_torrent(&contents) { + Ok(torrent) => Ok(torrent), + Err(e) => Err(e), + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs new file mode 100644 index 00000000..51c451b0 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs @@ -0,0 +1,43 @@ +use std::sync::Arc; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub async fn transfer_tracker_keys(source_database: Arc, target_database: Arc) { + println!("Transferring tracker keys ..."); + + // Transfer table `torrust_tracker_keys` + + let tracker_keys = source_database.get_tracker_keys().await.unwrap(); + + for tracker_key in &tracker_keys { + // [v2] table torrust_tracker_keys + + println!( + "[v2][torrust_users] adding the tracker key with id {:?} ...", + &tracker_key.key_id + ); + + let id = target_database + .insert_tracker_key( + tracker_key.key_id, + tracker_key.user_id, + &tracker_key.key, + tracker_key.valid_until, + ) + .await + .unwrap(); + + if id != tracker_key.key_id { + panic!( + "Error copying tracker key {:?} from source DB to the target DB", + &tracker_key.key_id + ); + } + + println!( + "[v2][torrust_tracker_keys] tracker key with id {:?} added.", + &tracker_key.key_id + ); + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs new file mode 100644 index 00000000..76f5ff44 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs @@ -0,0 +1,70 @@ +use std::sync::Arc; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub async fn transfer_users( + source_database: Arc, + target_database: Arc, + date_imported: &str, +) { + println!("Transferring users ..."); + + // Transfer table `torrust_users` + + let users = source_database.get_users().await.unwrap(); + + for user in &users { + // [v2] table torrust_users + + println!( + "[v2][torrust_users] adding user with username {:?} and id {:?} ...", + &user.username, &user.user_id + ); + + let id = target_database + .insert_imported_user(user.user_id, date_imported, user.administrator) + .await + .unwrap(); + + if id != user.user_id { + panic!("Error copying user {:?} from source DB to the target DB", &user.user_id); + } + + println!("[v2][torrust_users] user: {:?} {:?} added.", &user.user_id, &user.username); + + // [v2] table torrust_user_profiles + + println!( + "[v2][torrust_user_profiles] adding user profile for user with username {:?} and id {:?} ...", + &user.username, &user.user_id + ); + + target_database + .insert_user_profile(user.user_id, &user.username, &user.email, user.email_verified) + .await + .unwrap(); + + println!( + "[v2][torrust_user_profiles] user profile added for user with username {:?} and id {:?}.", + &user.username, &user.user_id + ); + + // [v2] table torrust_user_authentication + + println!( + "[v2][torrust_user_authentication] adding password hash ({:?}) for user id ({:?}) ...", + &user.password, &user.user_id + ); + + target_database + .insert_user_password_hash(user.user_id, &user.password) + .await + .unwrap(); + + println!( + "[v2][torrust_user_authentication] password hash ({:?}) added for user id ({:?}).", + &user.password, &user.user_id + ); + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs new file mode 100644 index 00000000..0e18d417 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -0,0 +1,108 @@ +//! It updates the application from version v1.0.0 to v2.0.0. +//! +//! NOTES for `torrust_users` table transfer: +//! +//! - In v2, the table `torrust_user` contains a field `date_registered` non existing in v1. +//! We changed that columns to allow NULL. We also added the new column `date_imported` with +//! the datetime when the upgrader was executed. +//! +//! NOTES for `torrust_user_profiles` table transfer: +//! +//! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. +//! Empty string is used as default value. + +use std::env; +use std::time::SystemTime; + +use chrono::prelude::{DateTime, Utc}; +use text_colorizer::*; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{current_db, migrate_target_database, new_db, reset_target_database}; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::category_transferrer::transfer_categories; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::transfer_torrents; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::user_transferrer::transfer_users; + +const NUMBER_OF_ARGUMENTS: usize = 3; + +#[derive(Debug)] +pub struct Arguments { + pub source_database_file: String, // The source database in version v1.0.0 we want to migrate + pub target_database_file: String, // The new migrated database in version v2.0.0 + pub upload_path: String, // The relative dir where torrent files are stored +} + +fn print_usage() { + eprintln!( + "{} - migrates date from version v1.0.0 to v2.0.0. + + cargo run --bin upgrade SOURCE_DB_FILE DESTINY_DB_FILE TORRENT_UPLOAD_DIR + + For example: + + cargo run --bin upgrade ./data.db ./data_v2.db ./uploads + + ", + "Upgrader".green() + ); +} + +fn parse_args() -> Arguments { + let args: Vec = env::args().skip(1).collect(); + + if args.len() != NUMBER_OF_ARGUMENTS { + eprintln!( + "{} wrong number of arguments: expected {}, got {}", + "Error".red().bold(), + NUMBER_OF_ARGUMENTS, + args.len() + ); + print_usage(); + } + + Arguments { + source_database_file: args[0].clone(), + target_database_file: args[1].clone(), + upload_path: args[2].clone(), + } +} + +pub async fn run_upgrader() { + let now = datetime_iso_8601(); + upgrade(&parse_args(), &now).await; +} + +pub async fn upgrade(args: &Arguments, date_imported: &str) { + // Get connection to the source database (current DB in settings) + let source_database = current_db(&args.source_database_file).await; + + // Get connection to the target database (new DB we want to migrate the data) + let target_database = new_db(&args.target_database_file).await; + + println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); + + migrate_target_database(target_database.clone()).await; + reset_target_database(target_database.clone()).await; + + transfer_categories(source_database.clone(), target_database.clone()).await; + transfer_users(source_database.clone(), target_database.clone(), date_imported).await; + transfer_tracker_keys(source_database.clone(), target_database.clone()).await; + transfer_torrents(source_database.clone(), target_database.clone(), &args.upload_path).await; + + println!("Upgrade data from version v1.0.0 to v2.0.0 finished!\n"); + + eprintln!( + "{}\nWe recommend you to run the command to import torrent statistics for all torrents manually. \ + If you do not do it the statistics will be imported anyway during the normal execution of the program. \ + You can import statistics manually with:\n {}", + "SUGGESTION: \n".yellow(), + "cargo run --bin import_tracker_statistics".yellow() + ); +} + +/// Current datetime in ISO8601 without time zone. +/// For example: 2022-11-10 10:35:15 +pub fn datetime_iso_8601() -> String { + let dt: DateTime = SystemTime::now().into(); + format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) +} diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs new file mode 100644 index 00000000..e22b19a7 --- /dev/null +++ b/src/upgrades/mod.rs @@ -0,0 +1 @@ +pub mod from_v1_0_0_to_v2_0_0; diff --git a/tests/README.md b/tests/README.md index 81e9d18a..2cad69c7 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,10 +1,21 @@ -### Running Tests -Torrust requires Docker to run different database systems for testing. [install docker here](https://docs.docker.com/engine/). +# Running Tests + +Torrust requires Docker to run different database systems for testing. [Install docker here](https://docs.docker.com/engine/). Start the databases with `docker-compose` before running tests: - $ docker-compose up +```s +docker-compose -f tests/docker-compose.yml up +``` Run all tests using: - $ cargo test +```s +cargo test +``` + +Connect to the DB using MySQL client: + +```s +mysql -h127.0.0.1 -uroot -ppassword torrust-index_test +``` diff --git a/tests/mod.rs b/tests/mod.rs index 22adeb6d..27bea3bd 100644 --- a/tests/mod.rs +++ b/tests/mod.rs @@ -1 +1,2 @@ mod databases; +pub mod upgrades; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql new file mode 100644 index 00000000..c535dfb9 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + username VARCHAR(32) NOT NULL UNIQUE, + email VARCHAR(100) NOT NULL UNIQUE, + email_verified BOOLEAN NOT NULL DEFAULT FALSE, + password TEXT NOT NULL +) \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql new file mode 100644 index 00000000..ef6f6865 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + key VARCHAR(32) NOT NULL, + valid_until INT(10) NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql new file mode 100644 index 00000000..c88abfe2 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql @@ -0,0 +1,7 @@ +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES +('movies'), ('tv shows'), ('games'), ('music'), ('software'); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql new file mode 100644 index 00000000..aeb3135a --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + number INTEGER NOT NULL, + path VARCHAR(255) NOT NULL, + length INTEGER NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql new file mode 100644 index 00000000..0b574c69 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql @@ -0,0 +1,2 @@ +ALTER TABLE torrust_users +ADD COLUMN administrator BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql new file mode 100644 index 00000000..413539a4 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader VARCHAR(32) NOT NULL, + info_hash VARCHAR(20) UNIQUE NOT NULL, + title VARCHAR(256) UNIQUE NOT NULL, + category_id INTEGER NOT NULL, + description TEXT, + upload_date INT(10) NOT NULL, + file_size BIGINT NOT NULL, + seeders INTEGER NOT NULL, + leechers INTEGER NOT NULL, + FOREIGN KEY(uploader) REFERENCES torrust_users(username) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql new file mode 100644 index 00000000..b786dcd2 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql @@ -0,0 +1,2 @@ +ALTER TABLE torrust_categories +ADD COLUMN icon VARCHAR(32); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent new file mode 100644 index 00000000..faa30f4c Binary files /dev/null and b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent differ diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent new file mode 100644 index 00000000..a62afbff Binary files /dev/null and b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent differ diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs new file mode 100644 index 00000000..29897ff7 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -0,0 +1,4 @@ +pub mod sqlite_v1_0_0; +pub mod sqlite_v2_0_0; +pub mod transferrer_testers; +pub mod upgrader; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore b/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore new file mode 100644 index 00000000..3997bead --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore @@ -0,0 +1 @@ +*.db \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs new file mode 100644 index 00000000..fa1adc92 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -0,0 +1,124 @@ +use std::fs; + +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query, SqlitePool}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ + CategoryRecordV1, TorrentRecordV1, TrackerKeyRecordV1, UserRecordV1, +}; + +pub struct SqliteDatabaseV1_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV1_0_0 { + pub async fn db_connection(database_file: &str) -> Self { + let connect_url = format!("sqlite://{}?mode=rwc", database_file); + Self::new(&connect_url).await + } + + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + /// Execute migrations for database in version v1.0.0 + pub async fn migrate(&self, fixtures_dir: &str) { + let migrations_dir = format!("{}database/v1.0.0/migrations/", fixtures_dir); + + let migrations = vec![ + "20210831113004_torrust_users.sql", + "20210904135524_torrust_tracker_keys.sql", + "20210905160623_torrust_categories.sql", + "20210907083424_torrust_torrent_files.sql", + "20211208143338_torrust_users.sql", + "20220308083424_torrust_torrents.sql", + "20220308170028_torrust_categories.sql", + ]; + + for migration_file_name in &migrations { + let migration_file_path = format!("{}{}", &migrations_dir, &migration_file_name); + self.run_migration_from_file(&migration_file_path).await; + } + } + + async fn run_migration_from_file(&self, migration_file_path: &str) { + println!("Executing migration: {:?}", migration_file_path); + + let sql = fs::read_to_string(migration_file_path).expect("Should have been able to read the file"); + + let res = sqlx::query(&sql).execute(&self.pool).await; + + println!("Migration result {:?}", res); + } + + pub async fn insert_category(&self, category: &CategoryRecordV1) -> Result { + query("INSERT INTO torrust_categories (category_id, name) VALUES (?, ?)") + .bind(category.category_id) + .bind(category.name.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn delete_all_categories(&self) -> Result<(), sqlx::Error> { + query("DELETE FROM torrust_categories").execute(&self.pool).await.unwrap(); + Ok(()) + } + + pub async fn insert_user(&self, user: &UserRecordV1) -> Result { + query("INSERT INTO torrust_users (user_id, username, email, email_verified, password, administrator) VALUES (?, ?, ?, ?, ?, ?)") + .bind(user.user_id) + .bind(user.username.clone()) + .bind(user.email.clone()) + .bind(user.email_verified) + .bind(user.password.clone()) + .bind(user.administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_tracker_key(&self, tracker_key: &TrackerKeyRecordV1) -> Result { + query("INSERT INTO torrust_tracker_keys (key_id, user_id, key, valid_until) VALUES (?, ?, ?, ?)") + .bind(tracker_key.key_id) + .bind(tracker_key.user_id) + .bind(tracker_key.key.clone()) + .bind(tracker_key.valid_until) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent(&self, torrent: &TorrentRecordV1) -> Result { + query( + "INSERT INTO torrust_torrents ( + torrent_id, + uploader, + info_hash, + title, + category_id, + description, + upload_date, + file_size, + seeders, + leechers + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(torrent.torrent_id) + .bind(torrent.uploader.clone()) + .bind(torrent.info_hash.clone()) + .bind(torrent.title.clone()) + .bind(torrent.category_id) + .bind(torrent.description.clone()) + .bind(torrent.upload_date) + .bind(torrent.file_size) + .bind(torrent.seeders) + .bind(torrent.leechers) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs new file mode 100644 index 00000000..8d863c10 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -0,0 +1,147 @@ +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query_as, SqlitePool}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::TorrentRecordV2; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV2 { + pub category_id: i64, + pub name: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserRecordV2 { + pub user_id: i64, + pub date_registered: Option, + pub date_imported: Option, + pub administrator: bool, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserProfileRecordV2 { + pub user_id: i64, + pub username: String, + pub email: String, + pub email_verified: bool, + pub bio: Option, + pub avatar: Option, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserAuthenticationRecordV2 { + pub user_id: i64, + pub password_hash: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TrackerKeyRecordV2 { + pub tracker_key_id: i64, + pub user_id: i64, + pub tracker_key: String, + pub date_expiry: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentInfoRecordV2 { + pub torrent_id: i64, + pub title: String, + pub description: Option, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, PartialEq)] +pub struct TorrentAnnounceUrlV2 { + pub announce_url_id: i64, + pub torrent_id: i64, + pub tracker_url: String, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, PartialEq)] +pub struct TorrentFileV2 { + pub file_id: i64, + pub torrent_id: i64, + pub md5sum: Option, + pub length: i64, + pub path: Option, +} + +pub struct SqliteDatabaseV2_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV2_0_0 { + pub async fn db_connection(database_file: &str) -> Self { + let connect_url = format!("sqlite://{}?mode=rwc", database_file); + Self::new(&connect_url).await + } + + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn get_category(&self, category_id: i64) -> Result { + query_as::<_, CategoryRecordV2>("SELECT * FROM torrust_categories WHERE category_id = ?") + .bind(category_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user(&self, user_id: i64) -> Result { + query_as::<_, UserRecordV2>("SELECT * FROM torrust_users WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user_profile(&self, user_id: i64) -> Result { + query_as::<_, UserProfileRecordV2>("SELECT * FROM torrust_user_profiles WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user_authentication(&self, user_id: i64) -> Result { + query_as::<_, UserAuthenticationRecordV2>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_tracker_key(&self, tracker_key_id: i64) -> Result { + query_as::<_, TrackerKeyRecordV2>("SELECT * FROM torrust_tracker_keys WHERE user_id = ?") + .bind(tracker_key_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent(&self, torrent_id: i64) -> Result { + query_as::<_, TorrentRecordV2>("SELECT * FROM torrust_torrents WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent_info(&self, torrent_id: i64) -> Result { + query_as::<_, TorrentInfoRecordV2>("SELECT * FROM torrust_torrent_info WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent_announce_urls(&self, torrent_id: i64) -> Result, sqlx::Error> { + query_as::<_, TorrentAnnounceUrlV2>("SELECT * FROM torrust_torrent_announce_urls WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrent_files(&self, torrent_id: i64) -> Result, sqlx::Error> { + query_as::<_, TorrentFileV2>("SELECT * FROM torrust_torrent_files WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs new file mode 100644 index 00000000..c10f93b8 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::CategoryRecordV1; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct CategoryTester { + source_database: Arc, + target_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub categories: Vec, +} + +impl CategoryTester { + pub fn new(source_database: Arc, target_database: Arc) -> Self { + let category_01 = CategoryRecordV1 { + category_id: 10, + name: "category name 10".to_string(), + }; + let category_02 = CategoryRecordV1 { + category_id: 11, + name: "category name 11".to_string(), + }; + + Self { + source_database, + target_database, + test_data: TestData { + categories: vec![category_01, category_02], + }, + } + } + + pub fn get_valid_category_id(&self) -> i64 { + self.test_data.categories[0].category_id + } + + /// Table `torrust_categories` + pub async fn load_data_into_source_db(&self) { + // Delete categories added by migrations + self.source_database.delete_all_categories().await.unwrap(); + + // Add test categories + for categories in &self.test_data.categories { + self.source_database.insert_category(&categories).await.unwrap(); + } + } + + /// Table `torrust_categories` + pub async fn assert_data_in_target_db(&self) { + for categories in &self.test_data.categories { + let imported_category = self.target_database.get_category(categories.category_id).await.unwrap(); + + assert_eq!(imported_category.category_id, categories.category_id); + assert_eq!(imported_category.name, categories.name); + } + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs new file mode 100644 index 00000000..459bcac8 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs @@ -0,0 +1,4 @@ +pub mod category_transferrer_tester; +pub mod torrent_transferrer_tester; +pub mod tracker_key_transferrer_tester; +pub mod user_transferrer_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs new file mode 100644 index 00000000..86bd1e52 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs @@ -0,0 +1,181 @@ +use std::sync::Arc; + +use torrust_index_backend::models::torrent_file::Torrent; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::convert_timestamp_to_datetime; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::read_torrent_from_file; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct TorrentTester { + source_database: Arc, + target_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub torrents: Vec, + pub user: UserRecordV1, +} + +impl TorrentTester { + pub fn new( + source_database: Arc, + target_database: Arc, + user: &UserRecordV1, + category_id: i64, + ) -> Self { + let torrent_01 = TorrentRecordV1 { + torrent_id: 1, + uploader: user.username.clone(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + title: "A Mandelbrot Set 2048x2048px picture".to_string(), + category_id, + description: Some( + "A beautiful Mandelbrot Set picture in black and white. \n \ + - Hybrid torrent V1 and V2. \n \ + - Single-file torrent. \n \ + - Public. \n \ + - More than one tracker URL. \n \ + " + .to_string(), + ), + upload_date: 1667546358, // 2022-11-04 07:19:18 + file_size: 9219566, + seeders: 0, + leechers: 0, + }; + let torrent_02 = TorrentRecordV1 { + torrent_id: 2, + uploader: user.username.clone(), + info_hash: "0902d375f18ec020f0cc68ed4810023032ba81cb".to_string(), + title: "Two Mandelbrot Set 2048x2048px pictures".to_string(), + category_id, + description: Some( + "Two beautiful Mandelbrot Set pictures in black and white. \n \ + - Hybrid torrent V1 and V2. \n \ + - Multiple-files torrent. \n \ + - Private. \n \ + - Only one tracker URL. \n \ + " + .to_string(), + ), + upload_date: 1667546358, // 2022-11-04 07:19:18 + file_size: 9219566, + seeders: 0, + leechers: 0, + }; + + Self { + source_database, + target_database, + test_data: TestData { + torrents: vec![torrent_01, torrent_02], + user: user.clone(), + }, + } + } + + pub async fn load_data_into_source_db(&self) { + for torrent in &self.test_data.torrents { + self.source_database.insert_torrent(&torrent).await.unwrap(); + } + } + + pub async fn assert_data_in_target_db(&self, upload_path: &str) { + for torrent in &self.test_data.torrents { + let filepath = self.torrent_file_path(upload_path, torrent.torrent_id); + + let torrent_file = read_torrent_from_file(&filepath).unwrap(); + + self.assert_torrent(&torrent, &torrent_file).await; + self.assert_torrent_info(&torrent).await; + self.assert_torrent_announce_urls(&torrent, &torrent_file).await; + self.assert_torrent_files(&torrent, &torrent_file).await; + } + } + + pub fn torrent_file_path(&self, upload_path: &str, torrent_id: i64) -> String { + format!("{}/{}.torrent", &upload_path, &torrent_id) + } + + /// Table `torrust_torrents` + async fn assert_torrent(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { + let imported_torrent = self.target_database.get_torrent(torrent.torrent_id).await.unwrap(); + + assert_eq!(imported_torrent.torrent_id, torrent.torrent_id); + assert_eq!(imported_torrent.uploader_id, self.test_data.user.user_id); + assert_eq!(imported_torrent.category_id, torrent.category_id); + assert_eq!(imported_torrent.info_hash, torrent.info_hash); + assert_eq!(imported_torrent.size, torrent.file_size); + assert_eq!(imported_torrent.name, torrent_file.info.name); + assert_eq!(imported_torrent.pieces, torrent_file.info.get_pieces_as_string()); + assert_eq!(imported_torrent.piece_length, torrent_file.info.piece_length); + if torrent_file.info.private.is_none() { + assert_eq!(imported_torrent.private, Some(0)); + } else { + assert_eq!(imported_torrent.private, torrent_file.info.private); + } + assert_eq!(imported_torrent.root_hash, torrent_file.info.get_root_hash_as_i64()); + assert_eq!( + imported_torrent.date_uploaded, + convert_timestamp_to_datetime(torrent.upload_date) + ); + } + + /// Table `torrust_torrent_info` + async fn assert_torrent_info(&self, torrent: &TorrentRecordV1) { + let torrent_info = self.target_database.get_torrent_info(torrent.torrent_id).await.unwrap(); + + assert_eq!(torrent_info.torrent_id, torrent.torrent_id); + assert_eq!(torrent_info.title, torrent.title); + assert_eq!(torrent_info.description, torrent.description); + } + + /// Table `torrust_torrent_announce_urls` + async fn assert_torrent_announce_urls(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { + let torrent_announce_urls = self + .target_database + .get_torrent_announce_urls(torrent.torrent_id) + .await + .unwrap(); + + let urls: Vec = torrent_announce_urls + .iter() + .map(|torrent_announce_url| torrent_announce_url.tracker_url.to_string()) + .collect(); + + let expected_urls = torrent_file.announce_urls(); + + assert_eq!(urls, expected_urls); + } + + /// Table `torrust_torrent_files` + async fn assert_torrent_files(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { + let db_torrent_files = self.target_database.get_torrent_files(torrent.torrent_id).await.unwrap(); + + if torrent_file.is_a_single_file_torrent() { + let db_torrent_file = &db_torrent_files[0]; + assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); + assert!(db_torrent_file.md5sum.is_none()); + assert_eq!(db_torrent_file.length, torrent_file.info.length.unwrap()); + assert!(db_torrent_file.path.is_none()); + } else { + let files = torrent_file.info.files.as_ref().unwrap(); + + // Files in torrent file + for file in files.iter() { + let file_path = file.path.join("/"); + + // Find file in database + let db_torrent_file = db_torrent_files.iter().find(|&f| f.path == Some(file_path.clone())).unwrap(); + + assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); + assert!(db_torrent_file.md5sum.is_none()); + assert_eq!(db_torrent_file.length, file.length); + assert_eq!(db_torrent_file.path, Some(file_path)); + } + } + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs new file mode 100644 index 00000000..e50ac861 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::TrackerKeyRecordV1; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct TrackerKeyTester { + source_database: Arc, + target_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub tracker_key: TrackerKeyRecordV1, +} + +impl TrackerKeyTester { + pub fn new(source_database: Arc, target_database: Arc, user_id: i64) -> Self { + let tracker_key = TrackerKeyRecordV1 { + key_id: 1, + user_id, + key: "rRstSTM5rx0sgxjLkRSJf3rXODcRBI5T".to_string(), + valid_until: 2456956800, // 11-10-2047 00:00:00 UTC + }; + + Self { + source_database, + target_database, + test_data: TestData { tracker_key }, + } + } + + pub async fn load_data_into_source_db(&self) { + self.source_database + .insert_tracker_key(&self.test_data.tracker_key) + .await + .unwrap(); + } + + /// Table `torrust_tracker_keys` + pub async fn assert_data_in_target_db(&self) { + let imported_key = self + .target_database + .get_tracker_key(self.test_data.tracker_key.key_id) + .await + .unwrap(); + + assert_eq!(imported_key.tracker_key_id, self.test_data.tracker_key.key_id); + assert_eq!(imported_key.user_id, self.test_data.tracker_key.user_id); + assert_eq!(imported_key.tracker_key, self.test_data.tracker_key.key); + assert_eq!(imported_key.date_expiry, self.test_data.tracker_key.valid_until); + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs new file mode 100644 index 00000000..2d52a683 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs @@ -0,0 +1,110 @@ +use std::sync::Arc; + +use argon2::password_hash::SaltString; +use argon2::{Argon2, PasswordHasher}; +use rand_core::OsRng; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + +pub struct UserTester { + source_database: Arc, + target_database: Arc, + execution_time: String, + pub test_data: TestData, +} + +pub struct TestData { + pub user: UserRecordV1, +} + +impl UserTester { + pub fn new( + source_database: Arc, + target_database: Arc, + execution_time: &str, + ) -> Self { + let user = UserRecordV1 { + user_id: 1, + username: "user01".to_string(), + email: "user01@torrust.com".to_string(), + email_verified: true, + password: hashed_valid_password(), + administrator: true, + }; + + Self { + source_database, + target_database, + execution_time: execution_time.to_owned(), + test_data: TestData { user }, + } + } + + pub async fn load_data_into_source_db(&self) { + self.source_database.insert_user(&self.test_data.user).await.unwrap(); + } + + pub async fn assert_data_in_target_db(&self) { + self.assert_user().await; + self.assert_user_profile().await; + self.assert_user_authentication().await; + } + + /// Table `torrust_users` + async fn assert_user(&self) { + let imported_user = self.target_database.get_user(self.test_data.user.user_id).await.unwrap(); + + assert_eq!(imported_user.user_id, self.test_data.user.user_id); + assert!(imported_user.date_registered.is_none()); + assert_eq!(imported_user.date_imported.unwrap(), self.execution_time); + assert_eq!(imported_user.administrator, self.test_data.user.administrator); + } + + /// Table `torrust_user_profiles` + async fn assert_user_profile(&self) { + let imported_user_profile = self + .target_database + .get_user_profile(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!(imported_user_profile.user_id, self.test_data.user.user_id); + assert_eq!(imported_user_profile.username, self.test_data.user.username); + assert_eq!(imported_user_profile.email, self.test_data.user.email); + assert_eq!(imported_user_profile.email_verified, self.test_data.user.email_verified); + assert!(imported_user_profile.bio.is_none()); + assert!(imported_user_profile.avatar.is_none()); + } + + /// Table `torrust_user_profiles` + async fn assert_user_authentication(&self) { + let imported_user_authentication = self + .target_database + .get_user_authentication(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!(imported_user_authentication.user_id, self.test_data.user.user_id); + assert_eq!(imported_user_authentication.password_hash, self.test_data.user.password); + } +} + +fn hashed_valid_password() -> String { + hash_password(&valid_password()) +} + +fn valid_password() -> String { + "123456".to_string() +} + +fn hash_password(plain_password: &str) -> String { + let salt = SaltString::generate(&mut OsRng); + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + // Hash password to PHC string ($argon2id$v=19$...) + argon2.hash_password(plain_password.as_bytes(), &salt).unwrap().to_string() +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs new file mode 100644 index 00000000..9e207b22 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -0,0 +1,124 @@ +//! You can run this test with: +//! +//! //! ```text +//! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 +//! ``` +//! +//! or: +//! +//! ```text +//! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture +//! ``` +//! +//! to see the "upgrader" command output. +use std::fs; +use std::path::Path; +use std::sync::Arc; + +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{datetime_iso_8601, upgrade, Arguments}; + +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::category_transferrer_tester::CategoryTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::torrent_transferrer_tester::TorrentTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::tracker_key_transferrer_tester::TrackerKeyTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::user_transferrer_tester::UserTester; + +struct TestConfig { + // Directories + pub fixtures_dir: String, + pub upload_path: String, + // Files + pub source_database_file: String, + pub target_database_file: String, +} + +impl Default for TestConfig { + fn default() -> Self { + let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); + let upload_path = format!("{}uploads/", &fixtures_dir); + let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); + let source_database_file = format!("{}source.db", output_dir); + let target_database_file = format!("{}target.db", output_dir); + Self { + fixtures_dir, + upload_path, + source_database_file, + target_database_file, + } + } +} + +#[tokio::test] +async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { + let config = TestConfig::default(); + + let (source_db, target_db) = setup_databases(&config).await; + + // The datetime when the upgrader is executed + let execution_time = datetime_iso_8601(); + + let category_tester = CategoryTester::new(source_db.clone(), target_db.clone()); + let user_tester = UserTester::new(source_db.clone(), target_db.clone(), &execution_time); + let tracker_key_tester = TrackerKeyTester::new(source_db.clone(), target_db.clone(), user_tester.test_data.user.user_id); + let torrent_tester = TorrentTester::new( + source_db.clone(), + target_db.clone(), + &user_tester.test_data.user, + category_tester.get_valid_category_id(), + ); + + // Load data into source database in version v1.0.0 + category_tester.load_data_into_source_db().await; + user_tester.load_data_into_source_db().await; + tracker_key_tester.load_data_into_source_db().await; + torrent_tester.load_data_into_source_db().await; + + // Run the upgrader + upgrade( + &Arguments { + source_database_file: config.source_database_file.clone(), + target_database_file: config.target_database_file.clone(), + upload_path: config.upload_path.clone(), + }, + &execution_time, + ) + .await; + + // Assertions for data transferred to the new database in version v2.0.0 + category_tester.assert_data_in_target_db().await; + user_tester.assert_data_in_target_db().await; + tracker_key_tester.assert_data_in_target_db().await; + torrent_tester.assert_data_in_target_db(&config.upload_path).await; +} + +async fn setup_databases(config: &TestConfig) -> (Arc, Arc) { + // Set up clean source database + reset_databases(&config.source_database_file, &config.target_database_file); + let source_database = source_db_connection(&config.source_database_file).await; + source_database.migrate(&config.fixtures_dir).await; + + // Set up connection for the target database + let target_database = target_db_connection(&config.target_database_file).await; + + (source_database, target_database) +} + +async fn source_db_connection(source_database_file: &str) -> Arc { + Arc::new(SqliteDatabaseV1_0_0::db_connection(&source_database_file).await) +} + +async fn target_db_connection(target_database_file: &str) -> Arc { + Arc::new(SqliteDatabaseV2_0_0::db_connection(&target_database_file).await) +} + +/// Reset databases from previous executions +fn reset_databases(source_database_file: &str, target_database_file: &str) { + if Path::new(source_database_file).exists() { + fs::remove_file(&source_database_file).expect("Can't remove the source DB file."); + } + + if Path::new(target_database_file).exists() { + fs::remove_file(&target_database_file).expect("Can't remove the target DB file."); + } +} diff --git a/tests/upgrades/mod.rs b/tests/upgrades/mod.rs new file mode 100644 index 00000000..e22b19a7 --- /dev/null +++ b/tests/upgrades/mod.rs @@ -0,0 +1 @@ +pub mod from_v1_0_0_to_v2_0_0; diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md new file mode 100644 index 00000000..37609149 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -0,0 +1,34 @@ +# Upgrade from v1.0.0 to v2.0.0 + +## How-to + +To upgrade from version `v1.0.0` to `v2.0.0` you have to follow these steps: + +- Back up your current database and the `uploads` folder. You can find which database and upload folder are you using in the `Config.toml` file in the root folder of your installation. +- Set up a local environment exactly as you have it in production with your production data (DB and torrents folder). +- Run the application locally with: `cargo run`. +- Execute the upgrader command: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads` +- A new SQLite file should have been created in the root folder: `data_v2.db` +- Stop the running application and change the DB configuration to use the newly generated configuration: + +```toml +[database] +connect_url = "sqlite://data_v2.db?mode=rwc" +``` + +- Run the application again. +- Perform some tests. +- If all tests pass, stop the production service, replace the DB, and start it again. + +## Tests + +Before replacing the DB in production you can make some tests like: + +- Try to log in with a preexisting user. If you do not know any you can create a new "test" user in production before starting with the upgrade process. Users had a different hash algorithm for the password in v1. +- Try to create a new user. +- Try to upload and download a new torrent containing a single file (with and without md5sum). +- Try to upload and download a new torrent containing a folder. + +## Notes + +The `db_schemas` contains the snapshots of the source and target databases for this upgrade. diff --git a/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql new file mode 100644 index 00000000..08349bb5 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql @@ -0,0 +1,152 @@ +# 20220721205537_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + date_registered DATETIME NOT NULL, + administrator BOOLEAN NOT NULL DEFAULT FALSE +) + +# 20220721210530_torrust_user_authentication.sql + +CREATE TABLE IF NOT EXISTS torrust_user_authentication ( + user_id INTEGER NOT NULL PRIMARY KEY, + password_hash TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220727213942_torrust_user_profiles.sql + +CREATE TABLE IF NOT EXISTS torrust_user_profiles ( + user_id INTEGER NOT NULL PRIMARY KEY, + username VARCHAR(24) NOT NULL UNIQUE, + email VARCHAR(320) UNIQUE, + email_verified BOOL NOT NULL DEFAULT FALSE, + bio TEXT, + avatar TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220727222313_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + tracker_key_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + tracker_key CHAR(32) NOT NULL, + date_expiry BIGINT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220730102607_torrust_user_public_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_user_public_keys ( + public_key_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + public_key CHAR(32) UNIQUE NOT NULL, + date_registered DATETIME NOT NULL, + date_expiry DATETIME NOT NULL, + revoked BOOLEAN NOT NULL DEFAULT FALSE, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220730104552_torrust_user_invitations.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitations ( + invitation_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + public_key CHAR(32) NOT NULL, + signed_digest CHAR(32) NOT NULL, + date_begin DATETIME NOT NULL, + date_expiry DATETIME NOT NULL, + max_uses INTEGER NOT NULL, + personal_message VARCHAR(512), + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(public_key) REFERENCES torrust_user_public_keys(public_key) ON DELETE CASCADE +) + +# 20220730105501_torrust_user_invitation_uses.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitation_uses ( + invitation_use_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + invitation_id INTEGER NOT NULL, + registered_user_id INTEGER NOT NULL, + date_used DATETIME NOT NULL, + FOREIGN KEY(invitation_id) REFERENCES torrust_user_invitations(invitation_id) ON DELETE CASCADE, + FOREIGN KEY(registered_user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220801201435_torrust_user_bans.sql + +CREATE TABLE IF NOT EXISTS torrust_user_bans ( + ban_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + reason TEXT NOT NULL, + date_expiry DATETIME NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220802161524_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES ('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +# 20220810192613_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + uploader_id INTEGER NOT NULL, + category_id INTEGER NOT NULL, + info_hash CHAR(40) UNIQUE NOT NULL, + size BIGINT NOT NULL, + name TEXT NOT NULL, + pieces LONGTEXT NOT NULL, + piece_length BIGINT NOT NULL, + private BOOLEAN NULL DEFAULT NULL, + root_hash BOOLEAN NOT NULL DEFAULT FALSE, + date_uploaded DATETIME NOT NULL, + FOREIGN KEY(uploader_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +) + +# 20220810201538_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + torrent_id INTEGER NOT NULL, + md5sum TEXT NULL DEFAULT NULL, + length BIGINT NOT NULL, + path TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220810201609_torrust_torrent_announce_urls.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_announce_urls ( + announce_url_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + torrent_id INTEGER NOT NULL, + tracker_url VARCHAR(256) NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220812181520_torrust_torrent_info.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_info ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + title VARCHAR(256) UNIQUE NOT NULL, + description TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220812184806_torrust_torrent_tracker_stats.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_tracker_stats ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + tracker_url VARCHAR(256) NOT NULL, + seeders INTEGER NOT NULL DEFAULT 0, + leechers INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + UNIQUE(torrent_id, tracker_url) +) diff --git a/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql new file mode 100644 index 00000000..214c4921 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql @@ -0,0 +1,68 @@ +# 20210831113004_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + username VARCHAR(32) NOT NULL UNIQUE, + email VARCHAR(100) NOT NULL UNIQUE, + email_verified BOOLEAN NOT NULL DEFAULT FALSE, + password TEXT NOT NULL +); + +# 20210904135524_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + key VARCHAR(32) NOT NULL, + valid_until INT(10) NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) +); + +# 20210905160623_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES +('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +# 20210907083424_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + number INTEGER NOT NULL, + path VARCHAR(255) NOT NULL, + length INTEGER NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) +); + +# 20211208143338_torrust_users.sql + +ALTER TABLE torrust_users; +ADD COLUMN administrator BOOLEAN NOT NULL DEFAULT FALSE; + +# 20220308083424_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader VARCHAR(32) NOT NULL, + info_hash VARCHAR(20) UNIQUE NOT NULL, + title VARCHAR(256) UNIQUE NOT NULL, + category_id INTEGER NOT NULL, + description TEXT, + upload_date INT(10) NOT NULL, + file_size BIGINT NOT NULL, + seeders INTEGER NOT NULL, + leechers INTEGER NOT NULL, + FOREIGN KEY(uploader) REFERENCES torrust_users(username) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +); + +# 20220308170028_torrust_categories.sql + +ALTER TABLE torrust_categories +ADD COLUMN icon VARCHAR(32); + diff --git a/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql new file mode 100644 index 00000000..b31aea68 --- /dev/null +++ b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql @@ -0,0 +1,152 @@ +#20220721205537_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + date_registered TEXT NOT NULL, + administrator BOOL NOT NULL DEFAULT FALSE +); + +#20220721210530_torrust_user_authentication.sql + +CREATE TABLE IF NOT EXISTS torrust_user_authentication ( + user_id INTEGER NOT NULL PRIMARY KEY, + password_hash TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220727213942_torrust_user_profiles.sql + +CREATE TABLE IF NOT EXISTS torrust_user_profiles ( + user_id INTEGER NOT NULL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT UNIQUE, + email_verified BOOL NOT NULL DEFAULT FALSE, + bio TEXT, + avatar TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220727222313_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + tracker_key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + tracker_key TEXT NOT NULL, + date_expiry INTEGER NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220730102607_torrust_user_public_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_user_public_keys ( + public_key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + public_key TEXT UNIQUE NOT NULL, + date_registered TEXT NOT NULL, + date_expiry TEXT NOT NULL, + revoked INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220730104552_torrust_user_invitations.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitations ( + invitation_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + public_key TEXT NOT NULL, + signed_digest TEXT NOT NULL, + date_begin TEXT NOT NULL, + date_expiry TEXT NOT NULL, + max_uses INTEGER NOT NULL, + personal_message TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(public_key) REFERENCES torrust_user_public_keys(public_key) ON DELETE CASCADE +); + +#20220730105501_torrust_user_invitation_uses.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitation_uses ( + invitation_use_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + invitation_id INTEGER NOT NULL, + registered_user_id INTEGER NOT NULL, + date_used TEXT NOT NULL, + FOREIGN KEY(invitation_id) REFERENCES torrust_user_invitations(invitation_id) ON DELETE CASCADE, + FOREIGN KEY(registered_user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220801201435_torrust_user_bans.sql + +CREATE TABLE IF NOT EXISTS torrust_user_bans ( + ban_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + reason TEXT NOT NULL, + date_expiry TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220802161524_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES ('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +#20220810192613_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader_id INTEGER NOT NULL, + category_id INTEGER NOT NULL, + info_hash TEXT UNIQUE NOT NULL, + size INTEGER NOT NULL, + name TEXT NOT NULL, + pieces TEXT NOT NULL, + piece_length INTEGER NOT NULL, + private BOOLEAN NULL DEFAULT NULL, + root_hash INT NOT NULL DEFAULT 0, + date_uploaded TEXT NOT NULL, + FOREIGN KEY(uploader_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +); + +#20220810201538_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + md5sum TEXT NULL DEFAULT NULL, + length BIGINT NOT NULL, + path TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220810201609_torrust_torrent_announce_urls.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_announce_urls ( + announce_url_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + tracker_url TEXT NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220812181520_torrust_torrent_info.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_info ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + title VARCHAR(256) UNIQUE NOT NULL, + description TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220812184806_torrust_torrent_tracker_stats.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_tracker_stats ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + tracker_url VARCHAR(256) NOT NULL, + seeders INTEGER NOT NULL DEFAULT 0, + leechers INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + UNIQUE(torrent_id, tracker_url) +);