From c3414da31c659590548cef11b118587f805232f7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Oct 2022 12:23:50 +0100 Subject: [PATCH 01/53] feat: add target dir to .gitignore --- .gitignore | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 1952496d..a1c33ca6 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ /.env -/data.db* /config.toml -/uploads/ +/data.db* +/target +/uploads/ \ No newline at end of file From 5d6dec0fcba33960be9afc21873cea327d68dcaf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Oct 2022 12:26:05 +0100 Subject: [PATCH 02/53] refactor: allow adding more binaries This change allow adding more binaries to the crate. We want to add a new binary to execute DB upgrades that have to be executed manually. --- Cargo.toml | 1 + src/{ => bin}/main.rs | 0 2 files changed, 1 insertion(+) rename src/{ => bin}/main.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 4d43f3e7..d89251ac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ name = "torrust-index-backend" version = "2.0.0-dev.1" authors = ["Mick van Dijke ", "Wesley Bijleveld "] edition = "2021" +default-run = "main" [profile.dev.package.sqlx-macros] opt-level = 3 diff --git a/src/main.rs b/src/bin/main.rs similarity index 100% rename from src/main.rs rename to src/bin/main.rs From 7513df07d01b8aee6fb159440c059d6ec942fee3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Oct 2022 17:20:20 +0100 Subject: [PATCH 03/53] refactor: add scaffolding for database migration command --- .gitignore | 1 + db_migrate/README.md | 133 +++++++++++++++ .../db_schemas/mysql/db_migrations_v2.sql | 152 ++++++++++++++++++ .../db_schemas/sqlite3/db_migrations_v1.sql | 68 ++++++++ .../db_schemas/sqlite3/db_migrations_v2.sql | 152 ++++++++++++++++++ db_migrate/docker/start_mysql.sh | 10 ++ db_migrate/docker/start_mysql_client.sh | 3 + db_migrate/docker/stop_mysql.sh | 3 + src/bin/db_migrate.rs | 55 +++++++ src/databases/database.rs | 16 ++ src/databases/sqlite.rs | 89 +++++++--- 11 files changed, 661 insertions(+), 21 deletions(-) create mode 100644 db_migrate/README.md create mode 100644 db_migrate/db_schemas/mysql/db_migrations_v2.sql create mode 100644 db_migrate/db_schemas/sqlite3/db_migrations_v1.sql create mode 100644 db_migrate/db_schemas/sqlite3/db_migrations_v2.sql create mode 100755 db_migrate/docker/start_mysql.sh create mode 100755 db_migrate/docker/start_mysql_client.sh create mode 100755 db_migrate/docker/stop_mysql.sh create mode 100644 src/bin/db_migrate.rs diff --git a/.gitignore b/.gitignore index a1c33ca6..42a0fc28 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ /.env /config.toml /data.db* +/data_v2.db* /target /uploads/ \ No newline at end of file diff --git a/db_migrate/README.md b/db_migrate/README.md new file mode 100644 index 00000000..617a6009 --- /dev/null +++ b/db_migrate/README.md @@ -0,0 +1,133 @@ +# DB migration + +With the console command `cargo run --bin db_migrate` you can migrate data from `v1.0.0` to `v2.0.0`. This migration includes: + +- Changing the DB schema. +- Transferring the torrent files in the dir `uploads` to the database. + +## SQLite3 + +TODO + +## MySQL8 + +Please, + +> WARNING: MySQL migration is not implemented yet. We also provide docker infrastructure to run mysql during implementation of a migration tool. + +and also: + +> WARNING: We are not using a persisted volume. If you remove the volume used by the container you lose the database data. + +Run the docker container and connect using the console client: + +```s +./db_migrate/docker/start_mysql.sh +./db_migrate/docker/mysql_client.sh +``` + +Once you are connected to the client you can create databases with: + +```s +create database torrust_v1; +create database torrust_v2; +``` + +After creating databases you should see something like this: + +```s +mysql> show databases; ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| mysql | +| performance_schema | +| sys | +| torrust_v1 | +| torrust_v2 | ++--------------------+ +6 rows in set (0.001 sec) +``` + +How to connect from outside the container: + +```s +mysql -h127.0.0.1 -uroot -pdb-root-password +``` + +## Create DB for backend `v2.0.0` + +You need to create an empty new database for v2.0.0. + +You need to change the configuration in `config.toml` file to use MySQL: + +```yml +[database] +connect_url = "mysql://root:db-root-password@127.0.0.1/torrust_v2" +``` + +After running the backend with `cargo run` you should see the tables created by migrations: + +```s +mysql> show tables; ++-------------------------------+ +| Tables_in_torrust_v2 | ++-------------------------------+ +| _sqlx_migrations | +| torrust_categories | +| torrust_torrent_announce_urls | +| torrust_torrent_files | +| torrust_torrent_info | +| torrust_torrent_tracker_stats | +| torrust_torrents | +| torrust_tracker_keys | +| torrust_user_authentication | +| torrust_user_bans | +| torrust_user_invitation_uses | +| torrust_user_invitations | +| torrust_user_profiles | +| torrust_user_public_keys | +| torrust_users | ++-------------------------------+ +15 rows in set (0.001 sec) +``` + +### Create DB for backend `v1.0.0` + +The `db_migrate` command is going to import data from version `v1.0.0` (database and `uploads` folder) into the new empty database for `v2.0.0`. + +You can import data into the source database for testing with the `mysql` DB client or docker. + +Using `mysql` client: + +```s +mysql -h127.0.0.1 -uroot -pdb-root-password torrust_v1 < ./db_migrate/db_schemas/db_migrations_v1_for_mysql_8.sql +``` + +Using dockerized `mysql` client: + +```s +docker exec -i torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-password < ./db_migrate/db_schemas/db_migrations_v1_for_mysql_8.sql +``` + +### Commands + +Connect to `mysql` client: + +```s +mysql -h127.0.0.1 -uroot -pdb-root-password torrust_v1 +``` + +Connect to dockerized `mysql` client: + +```s +docker exec -it torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-password +``` + +Backup DB: + +```s +mysqldump -h127.0.0.1 torrust_v1 -uroot -pdb-root-password > ./db_migrate/db_schemas/v1_schema_dump.sql +mysqldump -h127.0.0.1 torrust_v2 -uroot -pdb-root-password > ./db_migrate/db_schemas/v2_schema_dump.sql +``` diff --git a/db_migrate/db_schemas/mysql/db_migrations_v2.sql b/db_migrate/db_schemas/mysql/db_migrations_v2.sql new file mode 100644 index 00000000..08349bb5 --- /dev/null +++ b/db_migrate/db_schemas/mysql/db_migrations_v2.sql @@ -0,0 +1,152 @@ +# 20220721205537_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + date_registered DATETIME NOT NULL, + administrator BOOLEAN NOT NULL DEFAULT FALSE +) + +# 20220721210530_torrust_user_authentication.sql + +CREATE TABLE IF NOT EXISTS torrust_user_authentication ( + user_id INTEGER NOT NULL PRIMARY KEY, + password_hash TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220727213942_torrust_user_profiles.sql + +CREATE TABLE IF NOT EXISTS torrust_user_profiles ( + user_id INTEGER NOT NULL PRIMARY KEY, + username VARCHAR(24) NOT NULL UNIQUE, + email VARCHAR(320) UNIQUE, + email_verified BOOL NOT NULL DEFAULT FALSE, + bio TEXT, + avatar TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220727222313_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + tracker_key_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + tracker_key CHAR(32) NOT NULL, + date_expiry BIGINT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220730102607_torrust_user_public_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_user_public_keys ( + public_key_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + public_key CHAR(32) UNIQUE NOT NULL, + date_registered DATETIME NOT NULL, + date_expiry DATETIME NOT NULL, + revoked BOOLEAN NOT NULL DEFAULT FALSE, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220730104552_torrust_user_invitations.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitations ( + invitation_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + public_key CHAR(32) NOT NULL, + signed_digest CHAR(32) NOT NULL, + date_begin DATETIME NOT NULL, + date_expiry DATETIME NOT NULL, + max_uses INTEGER NOT NULL, + personal_message VARCHAR(512), + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(public_key) REFERENCES torrust_user_public_keys(public_key) ON DELETE CASCADE +) + +# 20220730105501_torrust_user_invitation_uses.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitation_uses ( + invitation_use_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + invitation_id INTEGER NOT NULL, + registered_user_id INTEGER NOT NULL, + date_used DATETIME NOT NULL, + FOREIGN KEY(invitation_id) REFERENCES torrust_user_invitations(invitation_id) ON DELETE CASCADE, + FOREIGN KEY(registered_user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220801201435_torrust_user_bans.sql + +CREATE TABLE IF NOT EXISTS torrust_user_bans ( + ban_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + user_id INTEGER NOT NULL, + reason TEXT NOT NULL, + date_expiry DATETIME NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +) + +# 20220802161524_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES ('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +# 20220810192613_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + uploader_id INTEGER NOT NULL, + category_id INTEGER NOT NULL, + info_hash CHAR(40) UNIQUE NOT NULL, + size BIGINT NOT NULL, + name TEXT NOT NULL, + pieces LONGTEXT NOT NULL, + piece_length BIGINT NOT NULL, + private BOOLEAN NULL DEFAULT NULL, + root_hash BOOLEAN NOT NULL DEFAULT FALSE, + date_uploaded DATETIME NOT NULL, + FOREIGN KEY(uploader_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +) + +# 20220810201538_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + torrent_id INTEGER NOT NULL, + md5sum TEXT NULL DEFAULT NULL, + length BIGINT NOT NULL, + path TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220810201609_torrust_torrent_announce_urls.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_announce_urls ( + announce_url_id INTEGER NOT NULL PRIMARY KEY AUTO_INCREMENT, + torrent_id INTEGER NOT NULL, + tracker_url VARCHAR(256) NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220812181520_torrust_torrent_info.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_info ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + title VARCHAR(256) UNIQUE NOT NULL, + description TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +) + +# 20220812184806_torrust_torrent_tracker_stats.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_tracker_stats ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + tracker_url VARCHAR(256) NOT NULL, + seeders INTEGER NOT NULL DEFAULT 0, + leechers INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + UNIQUE(torrent_id, tracker_url) +) diff --git a/db_migrate/db_schemas/sqlite3/db_migrations_v1.sql b/db_migrate/db_schemas/sqlite3/db_migrations_v1.sql new file mode 100644 index 00000000..214c4921 --- /dev/null +++ b/db_migrate/db_schemas/sqlite3/db_migrations_v1.sql @@ -0,0 +1,68 @@ +# 20210831113004_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + username VARCHAR(32) NOT NULL UNIQUE, + email VARCHAR(100) NOT NULL UNIQUE, + email_verified BOOLEAN NOT NULL DEFAULT FALSE, + password TEXT NOT NULL +); + +# 20210904135524_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + key VARCHAR(32) NOT NULL, + valid_until INT(10) NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) +); + +# 20210905160623_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES +('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +# 20210907083424_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + number INTEGER NOT NULL, + path VARCHAR(255) NOT NULL, + length INTEGER NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) +); + +# 20211208143338_torrust_users.sql + +ALTER TABLE torrust_users; +ADD COLUMN administrator BOOLEAN NOT NULL DEFAULT FALSE; + +# 20220308083424_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader VARCHAR(32) NOT NULL, + info_hash VARCHAR(20) UNIQUE NOT NULL, + title VARCHAR(256) UNIQUE NOT NULL, + category_id INTEGER NOT NULL, + description TEXT, + upload_date INT(10) NOT NULL, + file_size BIGINT NOT NULL, + seeders INTEGER NOT NULL, + leechers INTEGER NOT NULL, + FOREIGN KEY(uploader) REFERENCES torrust_users(username) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +); + +# 20220308170028_torrust_categories.sql + +ALTER TABLE torrust_categories +ADD COLUMN icon VARCHAR(32); + diff --git a/db_migrate/db_schemas/sqlite3/db_migrations_v2.sql b/db_migrate/db_schemas/sqlite3/db_migrations_v2.sql new file mode 100644 index 00000000..b31aea68 --- /dev/null +++ b/db_migrate/db_schemas/sqlite3/db_migrations_v2.sql @@ -0,0 +1,152 @@ +#20220721205537_torrust_users.sql + +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + date_registered TEXT NOT NULL, + administrator BOOL NOT NULL DEFAULT FALSE +); + +#20220721210530_torrust_user_authentication.sql + +CREATE TABLE IF NOT EXISTS torrust_user_authentication ( + user_id INTEGER NOT NULL PRIMARY KEY, + password_hash TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220727213942_torrust_user_profiles.sql + +CREATE TABLE IF NOT EXISTS torrust_user_profiles ( + user_id INTEGER NOT NULL PRIMARY KEY, + username TEXT NOT NULL UNIQUE, + email TEXT UNIQUE, + email_verified BOOL NOT NULL DEFAULT FALSE, + bio TEXT, + avatar TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220727222313_torrust_tracker_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + tracker_key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + tracker_key TEXT NOT NULL, + date_expiry INTEGER NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220730102607_torrust_user_public_keys.sql + +CREATE TABLE IF NOT EXISTS torrust_user_public_keys ( + public_key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + public_key TEXT UNIQUE NOT NULL, + date_registered TEXT NOT NULL, + date_expiry TEXT NOT NULL, + revoked INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220730104552_torrust_user_invitations.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitations ( + invitation_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + public_key TEXT NOT NULL, + signed_digest TEXT NOT NULL, + date_begin TEXT NOT NULL, + date_expiry TEXT NOT NULL, + max_uses INTEGER NOT NULL, + personal_message TEXT, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(public_key) REFERENCES torrust_user_public_keys(public_key) ON DELETE CASCADE +); + +#20220730105501_torrust_user_invitation_uses.sql + +CREATE TABLE IF NOT EXISTS torrust_user_invitation_uses ( + invitation_use_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + invitation_id INTEGER NOT NULL, + registered_user_id INTEGER NOT NULL, + date_used TEXT NOT NULL, + FOREIGN KEY(invitation_id) REFERENCES torrust_user_invitations(invitation_id) ON DELETE CASCADE, + FOREIGN KEY(registered_user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220801201435_torrust_user_bans.sql + +CREATE TABLE IF NOT EXISTS torrust_user_bans ( + ban_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER NOT NULL, + reason TEXT NOT NULL, + date_expiry TEXT NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE +); + +#20220802161524_torrust_categories.sql + +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES ('movies'), ('tv shows'), ('games'), ('music'), ('software'); + +#20220810192613_torrust_torrents.sql + +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader_id INTEGER NOT NULL, + category_id INTEGER NOT NULL, + info_hash TEXT UNIQUE NOT NULL, + size INTEGER NOT NULL, + name TEXT NOT NULL, + pieces TEXT NOT NULL, + piece_length INTEGER NOT NULL, + private BOOLEAN NULL DEFAULT NULL, + root_hash INT NOT NULL DEFAULT 0, + date_uploaded TEXT NOT NULL, + FOREIGN KEY(uploader_id) REFERENCES torrust_users(user_id) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +); + +#20220810201538_torrust_torrent_files.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + md5sum TEXT NULL DEFAULT NULL, + length BIGINT NOT NULL, + path TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220810201609_torrust_torrent_announce_urls.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_announce_urls ( + announce_url_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + tracker_url TEXT NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220812181520_torrust_torrent_info.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_info ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + title VARCHAR(256) UNIQUE NOT NULL, + description TEXT DEFAULT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE +); + +#20220812184806_torrust_torrent_tracker_stats.sql + +CREATE TABLE IF NOT EXISTS torrust_torrent_tracker_stats ( + torrent_id INTEGER NOT NULL PRIMARY KEY, + tracker_url VARCHAR(256) NOT NULL, + seeders INTEGER NOT NULL DEFAULT 0, + leechers INTEGER NOT NULL DEFAULT 0, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) ON DELETE CASCADE, + UNIQUE(torrent_id, tracker_url) +); diff --git a/db_migrate/docker/start_mysql.sh b/db_migrate/docker/start_mysql.sh new file mode 100755 index 00000000..5a245d32 --- /dev/null +++ b/db_migrate/docker/start_mysql.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +docker run \ + --detach \ + --name torrust-index-backend-mysql \ + --env MYSQL_USER=db-user \ + --env MYSQL_PASSWORD=db-passwrod \ + --env MYSQL_ROOT_PASSWORD=db-root-password \ + -p 3306:3306 \ + mysql:8.0.30 # This version is used in tests diff --git a/db_migrate/docker/start_mysql_client.sh b/db_migrate/docker/start_mysql_client.sh new file mode 100755 index 00000000..fed2a877 --- /dev/null +++ b/db_migrate/docker/start_mysql_client.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker exec -it torrust-index-backend-mysql mysql -uroot -pdb-root-password diff --git a/db_migrate/docker/stop_mysql.sh b/db_migrate/docker/stop_mysql.sh new file mode 100755 index 00000000..19d7a786 --- /dev/null +++ b/db_migrate/docker/stop_mysql.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +docker stop torrust-index-backend-mysql diff --git a/src/bin/db_migrate.rs b/src/bin/db_migrate.rs new file mode 100644 index 00000000..693ed5e8 --- /dev/null +++ b/src/bin/db_migrate.rs @@ -0,0 +1,55 @@ +//! Migration command to migrate data from v1.0.0 to v2.0.0 +//! Run it with `cargo run --bin db_migrate` + +use std::sync::Arc; +use torrust_index_backend::config::Configuration; +use torrust_index_backend::databases::database::{ + connect_database, connect_database_without_running_migrations, +}; + +#[actix_web::main] +async fn main() { + let dest_database_connect_url = "sqlite://data_v2.db?mode=rwc"; + + let cfg = match Configuration::load_from_file().await { + Ok(config) => Arc::new(config), + Err(error) => { + panic!("{}", error) + } + }; + + let settings = cfg.settings.read().await; + + // Connect to the current v1.0.0 DB + let source_database = Arc::new( + connect_database_without_running_migrations(&settings.database.connect_url) + .await + .expect("Can't connect to source DB."), + ); + + // Connect to the new v2.0.0 DB (running migrations) + let dest_database = Arc::new( + connect_database(&dest_database_connect_url) + .await + .expect("Can't connect to dest DB."), + ); + + println!("Upgrading database from v1.0.0 to v2.0.0 ..."); + + // It's just a test for the source connection. + // Print categories in current DB + let categories = source_database.get_categories().await; + println!("[v1] categories: {:?}", &categories); + + // It's just a test for the dest connection. + // Print categories in new DB + let categories = dest_database.get_categories().await; + println!("[v2] categories: {:?}", &categories); + + // Transfer categories + + /* TODO: + - Transfer categories: remove categories from seeding, reset sequence for IDs, copy categories in the right order to keep the same ids. + - ... + */ +} diff --git a/src/databases/database.rs b/src/databases/database.rs index 0f06f702..27adde76 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -77,6 +77,22 @@ pub async fn connect_database(db_path: &str) -> Result, Databa } } +/// Connect to a database without running migrations +pub async fn connect_database_without_running_migrations(db_path: &str) -> Result, DatabaseError> { + match &db_path.chars().collect::>() as &[char] { + ['s', 'q', 'l', 'i', 't', 'e', ..] => { + let db = SqliteDatabase::new_without_running_migrations(db_path).await; + Ok(Box::new(db)) + } + ['m', 'y', 's', 'q', 'l', ..] => { + todo!() + } + _ => { + Err(DatabaseError::UnrecognizedDatabaseDriver) + } + } +} + /// Trait for database implementations. #[async_trait] pub trait Database: Sync + Send { diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 835979fe..88a904ab 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -30,6 +30,14 @@ impl SqliteDatabase { Self { pool: db } } + + pub async fn new_without_running_migrations(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } } #[async_trait] @@ -54,12 +62,13 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error)?; // add password hash for account - let insert_user_auth_result = query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") - .bind(user_id) - .bind(password_hash) - .execute(&mut tx) - .await - .map_err(|_| DatabaseError::Error); + let insert_user_auth_result = + query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") + .bind(user_id) + .bind(password_hash) + .execute(&mut tx) + .await + .map_err(|_| DatabaseError::Error); // rollback transaction on error if let Err(e) = insert_user_auth_result { @@ -108,15 +117,23 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::UserNotFound) } - async fn get_user_authentication_from_id(&self, user_id: i64) -> Result { - query_as::<_, UserAuthentication>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") - .bind(user_id) - .fetch_one(&self.pool) - .await - .map_err(|_| DatabaseError::UserNotFound) + async fn get_user_authentication_from_id( + &self, + user_id: i64, + ) -> Result { + query_as::<_, UserAuthentication>( + "SELECT * FROM torrust_user_authentication WHERE user_id = ?", + ) + .bind(user_id) + .fetch_one(&self.pool) + .await + .map_err(|_| DatabaseError::UserNotFound) } - async fn get_user_profile_from_username(&self, username: &str) -> Result { + async fn get_user_profile_from_username( + &self, + username: &str, + ) -> Result { query_as::<_, UserProfile>("SELECT * FROM torrust_user_profiles WHERE username = ?") .bind(username) .fetch_one(&self.pool) @@ -155,7 +172,12 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error) } - async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), DatabaseError> { + async fn ban_user( + &self, + user_id: i64, + reason: &str, + date_expiry: NaiveDateTime, + ) -> Result<(), DatabaseError> { // date needs to be in ISO 8601 format let date_expiry_string = date_expiry.format("%Y-%m-%d %H:%M:%S").to_string(); @@ -193,7 +215,11 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error) } - async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), DatabaseError> { + async fn add_tracker_key( + &self, + user_id: i64, + tracker_key: &TrackerKey, + ) -> Result<(), DatabaseError> { let key = tracker_key.key.clone(); query("INSERT INTO torrust_tracker_keys (user_id, tracker_key, date_expiry) VALUES ($1, $2, $3)") @@ -343,7 +369,10 @@ impl Database for SqliteDatabase { category_filter_query ); - let count_query = format!("SELECT COUNT(*) as count FROM ({}) AS count_table", query_string); + let count_query = format!( + "SELECT COUNT(*) as count FROM ({}) AS count_table", + query_string + ); let count_result: Result = query_as(&count_query) .bind(title.clone()) @@ -390,7 +419,11 @@ impl Database for SqliteDatabase { let (pieces, root_hash): (String, bool) = if let Some(pieces) = &torrent.info.pieces { (bytes_to_hex(pieces.as_ref()), false) } else { - let root_hash = torrent.info.root_hash.as_ref().ok_or(DatabaseError::Error)?; + let root_hash = torrent + .info + .root_hash + .as_ref() + .ok_or(DatabaseError::Error)?; (root_hash.to_string(), true) }; @@ -537,7 +570,10 @@ impl Database for SqliteDatabase { )) } - async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result { + async fn get_torrent_info_from_id( + &self, + torrent_id: i64, + ) -> Result { query_as::<_, DbTorrentInfo>( "SELECT name, pieces, piece_length, private, root_hash FROM torrust_torrents WHERE torrent_id = ?", ) @@ -576,7 +612,10 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::TorrentNotFound) } - async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result { + async fn get_torrent_listing_from_id( + &self, + torrent_id: i64, + ) -> Result { query_as::<_, TorrentListing>( "SELECT tt.torrent_id, tp.username AS uploader, tt.info_hash, ti.title, ti.description, tt.category_id, tt.date_uploaded, tt.size AS file_size, CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, @@ -601,7 +640,11 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error) } - async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), DatabaseError> { + async fn update_torrent_title( + &self, + torrent_id: i64, + title: &str, + ) -> Result<(), DatabaseError> { query("UPDATE torrust_torrent_info SET title = $1 WHERE torrent_id = $2") .bind(title) .bind(torrent_id) @@ -626,7 +669,11 @@ impl Database for SqliteDatabase { }) } - async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), DatabaseError> { + async fn update_torrent_description( + &self, + torrent_id: i64, + description: &str, + ) -> Result<(), DatabaseError> { query("UPDATE torrust_torrent_info SET description = $1 WHERE torrent_id = $2") .bind(description) .bind(torrent_id) From b92fb0834cea34e783494081373c19167ceb5dd0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 27 Oct 2022 19:04:35 +0100 Subject: [PATCH 04/53] feat: [#56] transfer categories from db v1.0.0 to v2.0.0 First action for the command to upgrade data. It transfers the categories from the current DB schema (v1.0.0) to the new DB schema. --- src/bin/db_migrate.rs | 91 +++++++++++++++++---------- src/databases/database.rs | 16 ----- src/databases/mod.rs | 2 + src/databases/sqlite.rs | 8 --- src/databases/sqlite_v1_0_0.rs | 30 +++++++++ src/databases/sqlite_v2_0_0.rs | 109 +++++++++++++++++++++++++++++++++ 6 files changed, 200 insertions(+), 56 deletions(-) create mode 100644 src/databases/sqlite_v1_0_0.rs create mode 100644 src/databases/sqlite_v2_0_0.rs diff --git a/src/bin/db_migrate.rs b/src/bin/db_migrate.rs index 693ed5e8..fcfb7eae 100644 --- a/src/bin/db_migrate.rs +++ b/src/bin/db_migrate.rs @@ -3,14 +3,11 @@ use std::sync::Arc; use torrust_index_backend::config::Configuration; -use torrust_index_backend::databases::database::{ - connect_database, connect_database_without_running_migrations, -}; - -#[actix_web::main] -async fn main() { - let dest_database_connect_url = "sqlite://data_v2.db?mode=rwc"; +use torrust_index_backend::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use torrust_index_backend::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +async fn current_db() -> Arc { + // Connect to the old v1.0.0 DB let cfg = match Configuration::load_from_file().await { Ok(config) => Arc::new(config), Err(error) => { @@ -20,36 +17,66 @@ async fn main() { let settings = cfg.settings.read().await; - // Connect to the current v1.0.0 DB - let source_database = Arc::new( - connect_database_without_running_migrations(&settings.database.connect_url) - .await - .expect("Can't connect to source DB."), - ); + Arc::new(SqliteDatabaseV1_0_0::new(&settings.database.connect_url).await) +} + +async fn new_db(db_filename: String) -> Arc { + let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); + Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) +} + +async fn reset_destiny_database(dest_database: Arc) { + println!("Truncating all tables in destiny database ..."); + dest_database + .delete_all_database_rows() + .await + .expect("Can't reset destiny database."); +} - // Connect to the new v2.0.0 DB (running migrations) - let dest_database = Arc::new( - connect_database(&dest_database_connect_url) +async fn transfer_categories( + source_database: Arc, + dest_database: Arc, +) { + let source_categories = source_database.get_categories_order_by_id().await.unwrap(); + println!("[v1] categories: {:?}", &source_categories); + + let result = dest_database.reset_categories_sequence().await.unwrap(); + println!("result {:?}", result); + + for cat in &source_categories { + println!( + "[v2] adding category: {:?} {:?} ...", + &cat.category_id, &cat.name + ); + let id = dest_database + .insert_category_and_get_id(&cat.name) .await - .expect("Can't connect to dest DB."), - ); + .unwrap(); + + if id != cat.category_id { + panic!( + "Error copying category {:?} from source DB to destiny DB", + &cat.category_id + ); + } - println!("Upgrading database from v1.0.0 to v2.0.0 ..."); + println!("[v2] category: {:?} {:?} added.", id, &cat.name); + } - // It's just a test for the source connection. - // Print categories in current DB - let categories = source_database.get_categories().await; - println!("[v1] categories: {:?}", &categories); + let dest_categories = dest_database.get_categories().await.unwrap(); + println!("[v2] categories: {:?}", &dest_categories); +} + +#[actix_web::main] +async fn main() { + // Get connections to source adn destiny databases + let source_database = current_db().await; + let dest_database = new_db("data_v2.db".to_string()).await; - // It's just a test for the dest connection. - // Print categories in new DB - let categories = dest_database.get_categories().await; - println!("[v2] categories: {:?}", &categories); + println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); - // Transfer categories + reset_destiny_database(dest_database.clone()).await; + transfer_categories(source_database.clone(), dest_database.clone()).await; - /* TODO: - - Transfer categories: remove categories from seeding, reset sequence for IDs, copy categories in the right order to keep the same ids. - - ... - */ + // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. } diff --git a/src/databases/database.rs b/src/databases/database.rs index 27adde76..0f06f702 100644 --- a/src/databases/database.rs +++ b/src/databases/database.rs @@ -77,22 +77,6 @@ pub async fn connect_database(db_path: &str) -> Result, Databa } } -/// Connect to a database without running migrations -pub async fn connect_database_without_running_migrations(db_path: &str) -> Result, DatabaseError> { - match &db_path.chars().collect::>() as &[char] { - ['s', 'q', 'l', 'i', 't', 'e', ..] => { - let db = SqliteDatabase::new_without_running_migrations(db_path).await; - Ok(Box::new(db)) - } - ['m', 'y', 's', 'q', 'l', ..] => { - todo!() - } - _ => { - Err(DatabaseError::UnrecognizedDatabaseDriver) - } - } -} - /// Trait for database implementations. #[async_trait] pub trait Database: Sync + Send { diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 169d99f4..c15a2b72 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,5 @@ pub mod database; pub mod mysql; pub mod sqlite; +pub mod sqlite_v1_0_0; +pub mod sqlite_v2_0_0; diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 88a904ab..62b197d1 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -30,14 +30,6 @@ impl SqliteDatabase { Self { pool: db } } - - pub async fn new_without_running_migrations(database_url: &str) -> Self { - let db = SqlitePoolOptions::new() - .connect(database_url) - .await - .expect("Unable to create database pool."); - Self { pool: db } - } } #[async_trait] diff --git a/src/databases/sqlite_v1_0_0.rs b/src/databases/sqlite_v1_0_0.rs new file mode 100644 index 00000000..10420128 --- /dev/null +++ b/src/databases/sqlite_v1_0_0.rs @@ -0,0 +1,30 @@ +use super::database::DatabaseError; +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query_as, SqlitePool}; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct Category { + pub category_id: i64, + pub name: String, +} +pub struct SqliteDatabaseV1_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV1_0_0 { + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { + query_as::<_, Category>("SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC") + .fetch_all(&self.pool) + .await + .map_err(|_| DatabaseError::Error) + } +} diff --git a/src/databases/sqlite_v2_0_0.rs b/src/databases/sqlite_v2_0_0.rs new file mode 100644 index 00000000..0a1efe33 --- /dev/null +++ b/src/databases/sqlite_v2_0_0.rs @@ -0,0 +1,109 @@ +use super::database::DatabaseError; +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; +use sqlx::{query, query_as, SqlitePool}; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct Category { + pub category_id: i64, + pub name: String, +} +pub struct SqliteDatabaseV2_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV2_0_0 { + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn reset_categories_sequence(&self) -> Result { + query("DELETE FROM `sqlite_sequence` WHERE `name` = 'torrust_categories'") + .execute(&self.pool) + .await + .map_err(|_| DatabaseError::Error) + } + + pub async fn get_categories(&self) -> Result, DatabaseError> { + query_as::<_, Category>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") + .fetch_all(&self.pool) + .await + .map_err(|_| DatabaseError::Error) + } + + pub async fn insert_category_and_get_id(&self, category_name: &str) -> Result { + query("INSERT INTO torrust_categories (name) VALUES (?)") + .bind(category_name) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + .map_err(|e| match e { + sqlx::Error::Database(err) => { + if err.message().contains("UNIQUE") { + DatabaseError::CategoryAlreadyExists + } else { + DatabaseError::Error + } + } + _ => DatabaseError::Error, + }) + } + + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { + query("DELETE FROM torrust_categories;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_torrents;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_tracker_keys;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_users;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_authentication;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_bans;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_invitations;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_profiles;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_torrents;") + .execute(&self.pool) + .await + .unwrap(); + + query("DELETE FROM torrust_user_public_keys;") + .execute(&self.pool) + .await + .unwrap(); + + Ok(()) + } +} From 996c7d107558352cb377f2111b2ff5caa96cf6f1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 31 Oct 2022 13:38:40 +0000 Subject: [PATCH 05/53] refactor: [#56] rename command al dirs Make name more generic to allow addding other upgrade command in the future. --- src/bin/{db_migrate.rs => upgrade.rs} | 2 +- src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 0 .../from_v1_0_0_to_v2_0_0}/README.md | 16 ++++++++-------- .../db_schemas/mysql/db_migrations_v2.sql | 0 .../db_schemas/sqlite3/db_migrations_v1.sql | 0 .../db_schemas/sqlite3/db_migrations_v2.sql | 0 .../from_v1_0_0_to_v2_0_0}/docker/start_mysql.sh | 0 .../docker/start_mysql_client.sh | 0 .../from_v1_0_0_to_v2_0_0}/docker/stop_mysql.sh | 0 9 files changed, 9 insertions(+), 9 deletions(-) rename src/bin/{db_migrate.rs => upgrade.rs} (98%) create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/README.md (75%) rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/db_schemas/mysql/db_migrations_v2.sql (100%) rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/db_schemas/sqlite3/db_migrations_v1.sql (100%) rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/db_schemas/sqlite3/db_migrations_v2.sql (100%) rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/docker/start_mysql.sh (100%) rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/docker/start_mysql_client.sh (100%) rename {db_migrate => upgrades/from_v1_0_0_to_v2_0_0}/docker/stop_mysql.sh (100%) diff --git a/src/bin/db_migrate.rs b/src/bin/upgrade.rs similarity index 98% rename from src/bin/db_migrate.rs rename to src/bin/upgrade.rs index fcfb7eae..563bebdb 100644 --- a/src/bin/db_migrate.rs +++ b/src/bin/upgrade.rs @@ -1,5 +1,5 @@ //! Migration command to migrate data from v1.0.0 to v2.0.0 -//! Run it with `cargo run --bin db_migrate` +//! Run it with `cargo run --bin upgrade` use std::sync::Arc; use torrust_index_backend::config::Configuration; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs new file mode 100644 index 00000000..e69de29b diff --git a/db_migrate/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md similarity index 75% rename from db_migrate/README.md rename to upgrades/from_v1_0_0_to_v2_0_0/README.md index 617a6009..af9a9b69 100644 --- a/db_migrate/README.md +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -1,6 +1,6 @@ # DB migration -With the console command `cargo run --bin db_migrate` you can migrate data from `v1.0.0` to `v2.0.0`. This migration includes: +With the console command `cargo run --bin upgrade` you can migrate data from `v1.0.0` to `v2.0.0`. This migration includes: - Changing the DB schema. - Transferring the torrent files in the dir `uploads` to the database. @@ -22,8 +22,8 @@ and also: Run the docker container and connect using the console client: ```s -./db_migrate/docker/start_mysql.sh -./db_migrate/docker/mysql_client.sh +./upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh +./upgrades/from_v1_0_0_to_v2_0_0/docker/mysql_client.sh ``` Once you are connected to the client you can create databases with: @@ -95,20 +95,20 @@ mysql> show tables; ### Create DB for backend `v1.0.0` -The `db_migrate` command is going to import data from version `v1.0.0` (database and `uploads` folder) into the new empty database for `v2.0.0`. +The `upgrade` command is going to import data from version `v1.0.0` (database and `uploads` folder) into the new empty database for `v2.0.0`. You can import data into the source database for testing with the `mysql` DB client or docker. Using `mysql` client: ```s -mysql -h127.0.0.1 -uroot -pdb-root-password torrust_v1 < ./db_migrate/db_schemas/db_migrations_v1_for_mysql_8.sql +mysql -h127.0.0.1 -uroot -pdb-root-password torrust_v1 < ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/db_migrations_v1_for_mysql_8.sql ``` Using dockerized `mysql` client: ```s -docker exec -i torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-password < ./db_migrate/db_schemas/db_migrations_v1_for_mysql_8.sql +docker exec -i torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-password < ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/db_migrations_v1_for_mysql_8.sql ``` ### Commands @@ -128,6 +128,6 @@ docker exec -it torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-pa Backup DB: ```s -mysqldump -h127.0.0.1 torrust_v1 -uroot -pdb-root-password > ./db_migrate/db_schemas/v1_schema_dump.sql -mysqldump -h127.0.0.1 torrust_v2 -uroot -pdb-root-password > ./db_migrate/db_schemas/v2_schema_dump.sql +mysqldump -h127.0.0.1 torrust_v1 -uroot -pdb-root-password > ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/v1_schema_dump.sql +mysqldump -h127.0.0.1 torrust_v2 -uroot -pdb-root-password > ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/v2_schema_dump.sql ``` diff --git a/db_migrate/db_schemas/mysql/db_migrations_v2.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql similarity index 100% rename from db_migrate/db_schemas/mysql/db_migrations_v2.sql rename to upgrades/from_v1_0_0_to_v2_0_0/db_schemas/mysql/db_migrations_v2.sql diff --git a/db_migrate/db_schemas/sqlite3/db_migrations_v1.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql similarity index 100% rename from db_migrate/db_schemas/sqlite3/db_migrations_v1.sql rename to upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v1.sql diff --git a/db_migrate/db_schemas/sqlite3/db_migrations_v2.sql b/upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql similarity index 100% rename from db_migrate/db_schemas/sqlite3/db_migrations_v2.sql rename to upgrades/from_v1_0_0_to_v2_0_0/db_schemas/sqlite3/db_migrations_v2.sql diff --git a/db_migrate/docker/start_mysql.sh b/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh similarity index 100% rename from db_migrate/docker/start_mysql.sh rename to upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh diff --git a/db_migrate/docker/start_mysql_client.sh b/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql_client.sh similarity index 100% rename from db_migrate/docker/start_mysql_client.sh rename to upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql_client.sh diff --git a/db_migrate/docker/stop_mysql.sh b/upgrades/from_v1_0_0_to_v2_0_0/docker/stop_mysql.sh similarity index 100% rename from db_migrate/docker/stop_mysql.sh rename to upgrades/from_v1_0_0_to_v2_0_0/docker/stop_mysql.sh From d59097222703ccfe3d88941acc6da6a994ae6091 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 31 Oct 2022 13:59:44 +0000 Subject: [PATCH 06/53] refactor: [#56] move upgrader from main upgrade mod to specific version upgrader mod --- src/bin/upgrade.rs | 82 ++----------------- src/databases/mod.rs | 2 - src/lib.rs | 1 + .../from_v1_0_0_to_v2_0_0/databases/mod.rs | 2 + .../databases/sqlite_v1_0_0.rs | 13 +-- .../databases/sqlite_v2_0_0.rs | 10 ++- src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 2 + .../from_v1_0_0_to_v2_0_0/upgrader.rs | 79 ++++++++++++++++++ src/upgrades/mod.rs | 1 + 9 files changed, 105 insertions(+), 87 deletions(-) create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs rename src/{ => upgrades/from_v1_0_0_to_v2_0_0}/databases/sqlite_v1_0_0.rs (71%) rename src/{ => upgrades/from_v1_0_0_to_v2_0_0}/databases/sqlite_v2_0_0.rs (94%) create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs create mode 100644 src/upgrades/mod.rs diff --git a/src/bin/upgrade.rs b/src/bin/upgrade.rs index 563bebdb..15350d1d 100644 --- a/src/bin/upgrade.rs +++ b/src/bin/upgrade.rs @@ -1,82 +1,10 @@ -//! Migration command to migrate data from v1.0.0 to v2.0.0 -//! Run it with `cargo run --bin upgrade` +//! Upgrade command. +//! It updates the application from version v1.0.0 to v2.0.0. +//! You can execute it with: `cargo run --bin upgrade` -use std::sync::Arc; -use torrust_index_backend::config::Configuration; -use torrust_index_backend::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use torrust_index_backend::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; - -async fn current_db() -> Arc { - // Connect to the old v1.0.0 DB - let cfg = match Configuration::load_from_file().await { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } - }; - - let settings = cfg.settings.read().await; - - Arc::new(SqliteDatabaseV1_0_0::new(&settings.database.connect_url).await) -} - -async fn new_db(db_filename: String) -> Arc { - let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); - Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) -} - -async fn reset_destiny_database(dest_database: Arc) { - println!("Truncating all tables in destiny database ..."); - dest_database - .delete_all_database_rows() - .await - .expect("Can't reset destiny database."); -} - -async fn transfer_categories( - source_database: Arc, - dest_database: Arc, -) { - let source_categories = source_database.get_categories_order_by_id().await.unwrap(); - println!("[v1] categories: {:?}", &source_categories); - - let result = dest_database.reset_categories_sequence().await.unwrap(); - println!("result {:?}", result); - - for cat in &source_categories { - println!( - "[v2] adding category: {:?} {:?} ...", - &cat.category_id, &cat.name - ); - let id = dest_database - .insert_category_and_get_id(&cat.name) - .await - .unwrap(); - - if id != cat.category_id { - panic!( - "Error copying category {:?} from source DB to destiny DB", - &cat.category_id - ); - } - - println!("[v2] category: {:?} {:?} added.", id, &cat.name); - } - - let dest_categories = dest_database.get_categories().await.unwrap(); - println!("[v2] categories: {:?}", &dest_categories); -} +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::upgrade; #[actix_web::main] async fn main() { - // Get connections to source adn destiny databases - let source_database = current_db().await; - let dest_database = new_db("data_v2.db".to_string()).await; - - println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); - - reset_destiny_database(dest_database.clone()).await; - transfer_categories(source_database.clone(), dest_database.clone()).await; - - // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. + upgrade().await; } diff --git a/src/databases/mod.rs b/src/databases/mod.rs index c15a2b72..169d99f4 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,5 +1,3 @@ pub mod database; pub mod mysql; pub mod sqlite; -pub mod sqlite_v1_0_0; -pub mod sqlite_v2_0_0; diff --git a/src/lib.rs b/src/lib.rs index 5a0100c3..d7ef0d09 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,6 +7,7 @@ pub mod mailer; pub mod models; pub mod routes; pub mod tracker; +pub mod upgrades; pub mod utils; trait AsCSV { diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs new file mode 100644 index 00000000..fa37d81b --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs @@ -0,0 +1,2 @@ +pub mod sqlite_v1_0_0; +pub mod sqlite_v2_0_0; diff --git a/src/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs similarity index 71% rename from src/databases/sqlite_v1_0_0.rs rename to src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index 10420128..a7351479 100644 --- a/src/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -1,8 +1,9 @@ -use super::database::DatabaseError; use serde::{Deserialize, Serialize}; use sqlx::sqlite::SqlitePoolOptions; use sqlx::{query_as, SqlitePool}; +use crate::databases::database::DatabaseError; + #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct Category { pub category_id: i64, @@ -22,9 +23,11 @@ impl SqliteDatabaseV1_0_0 { } pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { - query_as::<_, Category>("SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC") - .fetch_all(&self.pool) - .await - .map_err(|_| DatabaseError::Error) + query_as::<_, Category>( + "SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC", + ) + .fetch_all(&self.pool) + .await + .map_err(|_| DatabaseError::Error) } } diff --git a/src/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs similarity index 94% rename from src/databases/sqlite_v2_0_0.rs rename to src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 0a1efe33..8dce7584 100644 --- a/src/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -1,8 +1,9 @@ -use super::database::DatabaseError; use serde::{Deserialize, Serialize}; use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; use sqlx::{query, query_as, SqlitePool}; +use crate::databases::database::DatabaseError; + #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct Category { pub category_id: i64, @@ -35,7 +36,10 @@ impl SqliteDatabaseV2_0_0 { .map_err(|_| DatabaseError::Error) } - pub async fn insert_category_and_get_id(&self, category_name: &str) -> Result { + pub async fn insert_category_and_get_id( + &self, + category_name: &str, + ) -> Result { query("INSERT INTO torrust_categories (name) VALUES (?)") .bind(category_name) .execute(&self.pool) @@ -51,7 +55,7 @@ impl SqliteDatabaseV2_0_0 { } _ => DatabaseError::Error, }) - } + } pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs new file mode 100644 index 00000000..ef4843d0 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -0,0 +1,2 @@ +pub mod upgrader; +pub mod databases; \ No newline at end of file diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index e69de29b..1be682cd 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -0,0 +1,79 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; + +use crate::config::Configuration; + +async fn current_db() -> Arc { + // Connect to the old v1.0.0 DB + let cfg = match Configuration::load_from_file().await { + Ok(config) => Arc::new(config), + Err(error) => { + panic!("{}", error) + } + }; + + let settings = cfg.settings.read().await; + + Arc::new(SqliteDatabaseV1_0_0::new(&settings.database.connect_url).await) +} + +async fn new_db(db_filename: String) -> Arc { + let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); + Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) +} + +async fn reset_destiny_database(dest_database: Arc) { + println!("Truncating all tables in destiny database ..."); + dest_database + .delete_all_database_rows() + .await + .expect("Can't reset destiny database."); +} + +async fn transfer_categories( + source_database: Arc, + dest_database: Arc, +) { + let source_categories = source_database.get_categories_order_by_id().await.unwrap(); + println!("[v1] categories: {:?}", &source_categories); + + let result = dest_database.reset_categories_sequence().await.unwrap(); + println!("result {:?}", result); + + for cat in &source_categories { + println!( + "[v2] adding category: {:?} {:?} ...", + &cat.category_id, &cat.name + ); + let id = dest_database + .insert_category_and_get_id(&cat.name) + .await + .unwrap(); + + if id != cat.category_id { + panic!( + "Error copying category {:?} from source DB to destiny DB", + &cat.category_id + ); + } + + println!("[v2] category: {:?} {:?} added.", id, &cat.name); + } + + let dest_categories = dest_database.get_categories().await.unwrap(); + println!("[v2] categories: {:?}", &dest_categories); +} + +pub async fn upgrade() { + // Get connections to source adn destiny databases + let source_database = current_db().await; + let dest_database = new_db("data_v2.db".to_string()).await; + + println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); + + reset_destiny_database(dest_database.clone()).await; + transfer_categories(source_database.clone(), dest_database.clone()).await; + + // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. +} diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs new file mode 100644 index 00000000..736d54f6 --- /dev/null +++ b/src/upgrades/mod.rs @@ -0,0 +1 @@ +pub mod from_v1_0_0_to_v2_0_0; \ No newline at end of file From d1059f50e5ab56673cd08defd4aac457de82f033 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 31 Oct 2022 16:55:20 +0000 Subject: [PATCH 07/53] feat: [#56] trasnfer user data from v1.0.0 to v2.0.0 TODO: transfer password --- .../databases/sqlite_v1_0_0.rs | 19 +++++ .../databases/sqlite_v2_0_0.rs | 38 ++++++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 76 ++++++++++++++++++- 3 files changed, 132 insertions(+), 1 deletion(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index a7351479..b38957fd 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -9,6 +9,17 @@ pub struct Category { pub category_id: i64, pub name: String, } + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct User { + pub user_id: i64, + pub username: String, + pub email: String, + pub email_verified: bool, + pub password: String, + pub administrator: bool, +} + pub struct SqliteDatabaseV1_0_0 { pub pool: SqlitePool, } @@ -30,4 +41,12 @@ impl SqliteDatabaseV1_0_0 { .await .map_err(|_| DatabaseError::Error) } + + pub async fn get_users(&self) -> Result, sqlx::Error> { + query_as::<_, User>( + "SELECT * FROM torrust_users ORDER BY user_id ASC", + ) + .fetch_all(&self.pool) + .await + } } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 8dce7584..5aa83fde 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -57,6 +57,44 @@ impl SqliteDatabaseV2_0_0 { }) } + pub async fn insert_user( + &self, + user_id: i64, + date_registered: &str, + administrator: bool, + ) -> Result { + query( + "INSERT INTO torrust_users (user_id, date_registered, administrator) VALUES (?, ?, ?)", + ) + .bind(user_id) + .bind(date_registered) + .bind(administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_user_profile( + &self, + user_id: i64, + username: &str, + email: &str, + email_verified: bool, + bio: &str, + avatar: &str, + ) -> Result { + query("INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)") + .bind(user_id) + .bind(username) + .bind(email) + .bind(email_verified) + .bind(bio) + .bind(avatar) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 1be682cd..29ad2a85 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -1,9 +1,24 @@ +//! It updates the application from version v1.0.0 to v2.0.0. +//! +//! NOTES for `torrust_users` table transfer: +//! +//! - In v2, the table `torrust_user` contains a field `date_registered` non existing in v1. +//! It's used the day when the upgrade command is executed. +//! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. +//! Empty string is used as default value. + use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use std::sync::Arc; +use chrono::prelude::{DateTime, Utc}; +use std::{sync::Arc, time::SystemTime}; use crate::config::Configuration; +fn today_iso8601() -> String { + let dt: DateTime = SystemTime::now().into(); + format!("{}", dt.format("%Y-%m-%d")) +} + async fn current_db() -> Arc { // Connect to the old v1.0.0 DB let cfg = match Configuration::load_from_file().await { @@ -75,5 +90,64 @@ pub async fn upgrade() { reset_destiny_database(dest_database.clone()).await; transfer_categories(source_database.clone(), dest_database.clone()).await; + // Transfer `torrust_users` + + let users = source_database.get_users().await.unwrap(); + + for user in &users { + // [v2] table torrust_users + + println!( + "[v2][torrust_users] adding user: {:?} {:?} ...", + &user.user_id, &user.username + ); + + let default_data_registered = today_iso8601(); + + let id = dest_database + .insert_user(user.user_id, &default_data_registered, user.administrator) + .await + .unwrap(); + + if id != user.user_id { + panic!( + "Error copying user {:?} from source DB to destiny DB", + &user.user_id + ); + } + + println!( + "[v2][torrust_users] user: {:?} {:?} added.", + &user.user_id, &user.username + ); + + // [v2] table torrust_user_profiles + + println!( + "[v2][torrust_user_profiles] adding user: {:?} {:?} ...", + &user.user_id, &user.username + ); + + let default_user_bio = "".to_string(); + let default_user_avatar = "".to_string(); + + dest_database + .insert_user_profile( + user.user_id, + &user.username, + &user.email, + user.email_verified, + &default_user_bio, + &default_user_avatar, + ) + .await + .unwrap(); + + println!( + "[v2][torrust_user_profiles] user: {:?} {:?} added.", + &user.user_id, &user.username + ); + } + // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. } From cf092835863780c411881479d3350586c36a1ba0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Nov 2022 10:37:49 +0000 Subject: [PATCH 08/53] docs: [#56] update README for integration tests --- tests/README.md | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/tests/README.md b/tests/README.md index 81e9d18a..2cad69c7 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,10 +1,21 @@ -### Running Tests -Torrust requires Docker to run different database systems for testing. [install docker here](https://docs.docker.com/engine/). +# Running Tests + +Torrust requires Docker to run different database systems for testing. [Install docker here](https://docs.docker.com/engine/). Start the databases with `docker-compose` before running tests: - $ docker-compose up +```s +docker-compose -f tests/docker-compose.yml up +``` Run all tests using: - $ cargo test +```s +cargo test +``` + +Connect to the DB using MySQL client: + +```s +mysql -h127.0.0.1 -uroot -ppassword torrust-index_test +``` From 01921edfb8a10318857f5e7aa7a88a18111cb48e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Nov 2022 10:39:32 +0000 Subject: [PATCH 09/53] fix: [#56] triggering recompilation on migration changes Migrations were not executed while running integration tests. After adding a new migration without changing any Rust code. More info: https://docs.rs/sqlx/latest/sqlx/macro.migrate.html#triggering-recompilation-on-migration-changes --- build.rs | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 build.rs diff --git a/build.rs b/build.rs new file mode 100644 index 00000000..76095938 --- /dev/null +++ b/build.rs @@ -0,0 +1,5 @@ +// generated by `sqlx migrate build-script` +fn main() { + // trigger recompilation when a new migration is added + println!("cargo:rerun-if-changed=migrations"); +} \ No newline at end of file From d9b4e871ed4701eadef8f5d386b090e746699e19 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Nov 2022 11:00:39 +0000 Subject: [PATCH 10/53] feat: [#56] transfer user password from v1.0.0 to v2.0.0 We transfer the field `torrust_users.password` to `torrust_user_authentication.password_hash`. The hash value is using the PHC string format since v.1.0.0. In v1.0.0 we were using the hash function "pbkdf2-sha256". In v2.0.0 we are using "argon2id". The packages we use to verify password allow using different hash functions. So we only had to use a different algorithm depending on the hash id in the PHC string. --- Cargo.lock | 22 ++++++++ Cargo.toml | 1 + src/models/user.rs | 2 +- src/routes/user.rs | 37 +++++++++---- .../databases/sqlite_v2_0_0.rs | 20 +++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 53 +++++++++++++++---- 6 files changed, 113 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7a6994e..e65c2f14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -941,6 +941,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.3", +] + [[package]] name = "home" version = "0.5.3" @@ -1545,6 +1554,18 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.3", + "hmac", + "password-hash", + "sha2", +] + [[package]] name = "pem" version = "1.1.0" @@ -2577,6 +2598,7 @@ dependencies = [ "futures", "jsonwebtoken", "lettre", + "pbkdf2", "rand_core", "regex", "reqwest", diff --git a/Cargo.toml b/Cargo.toml index d89251ac..60206325 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,3 +34,4 @@ tokio = {version = "1.13", features = ["macros", "io-util", "net", "time", "rt-m lettre = { version = "0.10.0-rc.3", features = ["builder", "tokio1", "tokio1-rustls-tls", "smtp-transport"]} sailfish = "0.4.0" regex = "1.6.0" +pbkdf2 = "0.11.0" diff --git a/src/models/user.rs b/src/models/user.rs index f64b88b4..fdf86f76 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -10,7 +10,7 @@ pub struct User { #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserAuthentication { pub user_id: i64, - pub password_hash: String, + pub password_hash: String } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, sqlx::FromRow)] diff --git a/src/routes/user.rs b/src/routes/user.rs index 6b535bc6..9195be7a 100644 --- a/src/routes/user.rs +++ b/src/routes/user.rs @@ -2,6 +2,7 @@ use actix_web::{web, HttpRequest, HttpResponse, Responder}; use argon2::password_hash::SaltString; use argon2::{Argon2, PasswordHash, PasswordHasher, PasswordVerifier}; use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation}; +use pbkdf2::Pbkdf2; use rand_core::OsRng; use serde::{Deserialize, Serialize}; @@ -10,6 +11,7 @@ use crate::config::EmailOnSignup; use crate::errors::{ServiceError, ServiceResult}; use crate::mailer::VerifyClaims; use crate::models::response::{OkResponse, TokenResponse}; +use crate::models::user::UserAuthentication; use crate::utils::regex::validate_email_address; use crate::utils::time::current_time; @@ -139,16 +141,7 @@ pub async fn login(payload: web::Json, app_data: WebAppData) -> ServiceRe .await .map_err(|_| ServiceError::InternalServerError)?; - // wrap string of the hashed password into a PasswordHash struct for verification - let parsed_hash = PasswordHash::new(&user_authentication.password_hash)?; - - // verify if the user supplied and the database supplied passwords match - if Argon2::default() - .verify_password(payload.password.as_bytes(), &parsed_hash) - .is_err() - { - return Err(ServiceError::WrongPasswordOrUsername); - } + verify_password(payload.password.as_bytes(), &user_authentication)?; let settings = app_data.cfg.settings.read().await; @@ -174,6 +167,30 @@ pub async fn login(payload: web::Json, app_data: WebAppData) -> ServiceRe })) } +/// Verify if the user supplied and the database supplied passwords match +pub fn verify_password(password: &[u8], user_authentication: &UserAuthentication) -> Result<(), ServiceError> { + // wrap string of the hashed password into a PasswordHash struct for verification + let parsed_hash = PasswordHash::new(&user_authentication.password_hash)?; + + match parsed_hash.algorithm.as_str() { + "argon2id" => { + if Argon2::default().verify_password(password, &parsed_hash).is_err() { + return Err(ServiceError::WrongPasswordOrUsername); + } + + Ok(()) + } + "pbkdf2-sha256" => { + if Pbkdf2.verify_password(password, &parsed_hash).is_err() { + return Err(ServiceError::WrongPasswordOrUsername); + } + + Ok(()) + } + _ => Err(ServiceError::WrongPasswordOrUsername), + } +} + pub async fn verify_token(payload: web::Json, app_data: WebAppData) -> ServiceResult { // verify if token is valid let _claims = app_data.auth.verify_jwt(&payload.token).await?; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 5aa83fde..b1eb97e3 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -22,6 +22,13 @@ impl SqliteDatabaseV2_0_0 { Self { pool: db } } + pub async fn migrate(&self) { + sqlx::migrate!("migrations/sqlite3") + .run(&self.pool) + .await + .expect("Could not run database migrations.") + } + pub async fn reset_categories_sequence(&self) -> Result { query("DELETE FROM `sqlite_sequence` WHERE `name` = 'torrust_categories'") .execute(&self.pool) @@ -95,6 +102,19 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } + pub async fn insert_user_password_hash( + &self, + user_id: i64, + password_hash: &str, + ) -> Result { + query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") + .bind(user_id) + .bind(password_hash) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 29ad2a85..4d250188 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -7,8 +7,10 @@ //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::{ + upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0, +}; use chrono::prelude::{DateTime, Utc}; use std::{sync::Arc, time::SystemTime}; @@ -38,6 +40,11 @@ async fn new_db(db_filename: String) -> Arc { Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) } +async fn migrate_destiny_database(dest_database: Arc) { + println!("Running migrations ..."); + dest_database.migrate().await; +} + async fn reset_destiny_database(dest_database: Arc) { println!("Truncating all tables in destiny database ..."); dest_database @@ -80,16 +87,10 @@ async fn transfer_categories( println!("[v2] categories: {:?}", &dest_categories); } -pub async fn upgrade() { - // Get connections to source adn destiny databases - let source_database = current_db().await; - let dest_database = new_db("data_v2.db".to_string()).await; - - println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); - - reset_destiny_database(dest_database.clone()).await; - transfer_categories(source_database.clone(), dest_database.clone()).await; - +async fn transfer_user_data( + source_database: Arc, + dest_database: Arc, +) { // Transfer `torrust_users` let users = source_database.get_users().await.unwrap(); @@ -147,7 +148,37 @@ pub async fn upgrade() { "[v2][torrust_user_profiles] user: {:?} {:?} added.", &user.user_id, &user.username ); + + // [v2] table torrust_user_authentication + + println!( + "[v2][torrust_user_authentication] adding password hash ({:?}) for user ({:?}) ...", + &user.password, &user.user_id + ); + + dest_database + .insert_user_password_hash(user.user_id, &user.password) + .await + .unwrap(); + + println!( + "[v2][torrust_user_authentication] password hash ({:?}) added for user ({:?}).", + &user.password, &user.user_id + ); } +} + +pub async fn upgrade() { + // Get connections to source adn destiny databases + let source_database = current_db().await; + let dest_database = new_db("data_v2.db".to_string()).await; + + println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); + + migrate_destiny_database(dest_database.clone()).await; + reset_destiny_database(dest_database.clone()).await; + transfer_categories(source_database.clone(), dest_database.clone()).await; + transfer_user_data(source_database.clone(), dest_database.clone()).await; // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. } From dd949fa91dce3daf1a480ca4fa320821ea40e551 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 3 Nov 2022 13:17:58 +0000 Subject: [PATCH 11/53] feat: [#56] transfer tracker keys from v1.0.0 to v2.0.0 --- .../databases/sqlite_v1_0_0.rs | 22 ++++++-- .../databases/sqlite_v2_0_0.rs | 17 ++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 55 +++++++++++++++++-- 3 files changed, 84 insertions(+), 10 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index b38957fd..584c6776 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -20,6 +20,14 @@ pub struct User { pub administrator: bool, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TrackerKey { + pub key_id: i64, + pub user_id: i64, + pub key: String, + pub valid_until: i64, +} + pub struct SqliteDatabaseV1_0_0 { pub pool: SqlitePool, } @@ -43,10 +51,14 @@ impl SqliteDatabaseV1_0_0 { } pub async fn get_users(&self) -> Result, sqlx::Error> { - query_as::<_, User>( - "SELECT * FROM torrust_users ORDER BY user_id ASC", - ) - .fetch_all(&self.pool) - .await + query_as::<_, User>("SELECT * FROM torrust_users ORDER BY user_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { + query_as::<_, TrackerKey>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") + .fetch_all(&self.pool) + .await } } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index b1eb97e3..e16b4571 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -115,6 +115,23 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } + pub async fn insert_tracker_key( + &self, + tracker_key_id: i64, + user_id: i64, + tracker_key: &str, + date_expiry: i64, + ) -> Result { + query("INSERT INTO torrust_tracker_keys (tracker_key_id, user_id, tracker_key, date_expiry) VALUES (?, ?, ?, ?)") + .bind(tracker_key_id) + .bind(user_id) + .bind(tracker_key) + .bind(date_expiry) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 4d250188..04500666 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -7,10 +7,8 @@ //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use crate::{ - upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0, -}; use chrono::prelude::{DateTime, Utc}; use std::{sync::Arc, time::SystemTime}; @@ -41,7 +39,7 @@ async fn new_db(db_filename: String) -> Arc { } async fn migrate_destiny_database(dest_database: Arc) { - println!("Running migrations ..."); + println!("Running migrations in destiny database..."); dest_database.migrate().await; } @@ -57,6 +55,8 @@ async fn transfer_categories( source_database: Arc, dest_database: Arc, ) { + println!("Transferring categories ..."); + let source_categories = source_database.get_categories_order_by_id().await.unwrap(); println!("[v1] categories: {:?}", &source_categories); @@ -91,7 +91,9 @@ async fn transfer_user_data( source_database: Arc, dest_database: Arc, ) { - // Transfer `torrust_users` + println!("Transferring users ..."); + + // Transfer table `torrust_users` let users = source_database.get_users().await.unwrap(); @@ -168,6 +170,48 @@ async fn transfer_user_data( } } +async fn transfer_tracker_keys( + source_database: Arc, + dest_database: Arc, +) { + println!("Transferring tracker keys ..."); + + // Transfer table `torrust_tracker_keys` + + let tracker_keys = source_database.get_tracker_keys().await.unwrap(); + + for tracker_key in &tracker_keys { + // [v2] table torrust_tracker_keys + + println!( + "[v2][torrust_users] adding the tracker key: {:?} ...", + &tracker_key.key_id + ); + + let id = dest_database + .insert_tracker_key( + tracker_key.key_id, + tracker_key.user_id, + &tracker_key.key, + tracker_key.valid_until, + ) + .await + .unwrap(); + + if id != tracker_key.key_id { + panic!( + "Error copying tracker key {:?} from source DB to destiny DB", + &tracker_key.key_id + ); + } + + println!( + "[v2][torrust_tracker_keys] tracker key: {:?} added.", + &tracker_key.key_id + ); + } +} + pub async fn upgrade() { // Get connections to source adn destiny databases let source_database = current_db().await; @@ -179,6 +223,7 @@ pub async fn upgrade() { reset_destiny_database(dest_database.clone()).await; transfer_categories(source_database.clone(), dest_database.clone()).await; transfer_user_data(source_database.clone(), dest_database.clone()).await; + transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. } From 35f1e371b5d2ceb20fe20d0c7f027e3b6df216d9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 3 Nov 2022 13:27:52 +0000 Subject: [PATCH 12/53] fix: [#56} default user registration date with time When we import users from previous versions where the app did not store the registration date we assign the current datetime for the registration. --- src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 04500666..5d6b733c 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -16,7 +16,7 @@ use crate::config::Configuration; fn today_iso8601() -> String { let dt: DateTime = SystemTime::now().into(); - format!("{}", dt.format("%Y-%m-%d")) + format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) } async fn current_db() -> Arc { From 8d26faa76ab11dbebcd6e25ee5a4b7414592dfb1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 4 Nov 2022 10:27:42 +0000 Subject: [PATCH 13/53] fix: [#78] parsing keys from tracker --- src/models/tracker_key.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/models/tracker_key.rs b/src/models/tracker_key.rs index 71bf51c3..15e23622 100644 --- a/src/models/tracker_key.rs +++ b/src/models/tracker_key.rs @@ -6,3 +6,15 @@ pub struct TrackerKey { pub key: String, pub valid_until: i64, } + +#[derive(Debug, Serialize, Deserialize)] +pub struct NewTrackerKey { + pub key: String, + pub valid_until: Duration, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Duration { + pub secs: i64, + pub nanos: i64, +} \ No newline at end of file From 0b3aefaa63ce14c3614581b527c24a153a7bedf5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 4 Nov 2022 18:11:32 +0000 Subject: [PATCH 14/53] feat: [#56] transfer torrents (1/4 tables) from v1.0.0 to v2.0.0 Transferred data to tables in versio v2.0.0: - [x] Table `torrust_torrents` - [ ] Table `torrust_torrent_files` - [ ] Table `torrust_torrent_announce_urls` - [ ] Table `torrust_torrent_info` --- src/models/torrent_file.rs | 18 ++ .../databases/sqlite_v1_0_0.rs | 42 +++++ .../databases/sqlite_v2_0_0.rs | 46 +++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 167 +++++++++++++++--- 4 files changed, 253 insertions(+), 20 deletions(-) diff --git a/src/models/torrent_file.rs b/src/models/torrent_file.rs index 581d73a8..c7ab26a7 100644 --- a/src/models/torrent_file.rs +++ b/src/models/torrent_file.rs @@ -39,6 +39,24 @@ pub struct TorrentInfo { pub root_hash: Option, } +impl TorrentInfo { + /// torrent file can only hold a pieces key or a root hash key: + /// http://www.bittorrent.org/beps/bep_0030.html + pub fn get_pieces_as_string(&self) -> String { + match &self.pieces { + None => "".to_string(), + Some(byte_buf) => bytes_to_hex(byte_buf.as_ref()) + } + } + + pub fn get_root_hash_as_i64(&self) -> i64 { + match &self.root_hash { + None => 0i64, + Some(root_hash) => root_hash.parse::().unwrap() + } + } +} + #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] pub struct Torrent { pub info: TorrentInfo, // diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index 584c6776..3f784db1 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -28,6 +28,29 @@ pub struct TrackerKey { pub valid_until: i64, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct Torrent { + pub torrent_id: i64, + pub uploader: String, + pub info_hash: String, + pub title: String, + pub category_id: i64, + pub description: String, + pub upload_date: i64, + pub file_size: i64, + pub seeders: i64, + pub leechers: i64, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentFile { + pub file_id: i64, + pub torrent_uid: i64, + pub number: i64, + pub path: String, + pub length: i64, +} + pub struct SqliteDatabaseV1_0_0 { pub pool: SqlitePool, } @@ -56,9 +79,28 @@ impl SqliteDatabaseV1_0_0 { .await } + pub async fn get_user_by_username(&self, username: &str) -> Result { + query_as::<_, User>("SELECT * FROM torrust_users WHERE username = ?") + .bind(username) + .fetch_one(&self.pool) + .await + } + pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { query_as::<_, TrackerKey>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") .fetch_all(&self.pool) .await } + + pub async fn get_torrents(&self) -> Result, sqlx::Error> { + query_as::<_, Torrent>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") + .fetch_all(&self.pool) + .await + } + + pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentFile>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") + .fetch_all(&self.pool) + .await + } } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index e16b4571..04c04216 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -132,6 +132,52 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } + pub async fn insert_torrent( + &self, + torrent_id: i64, + uploader_id: i64, + category_id: i64, + info_hash: &str, + size: i64, + name: &str, + pieces: &str, + piece_length: i64, + private: bool, + root_hash: i64, + date_uploaded: &str, + ) -> Result { + query( + " + INSERT INTO torrust_torrents ( + torrent_id, + uploader_id, + category_id, + info_hash, + size, + name, + pieces, + piece_length, + private, + root_hash, + date_uploaded + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(torrent_id) + .bind(uploader_id) + .bind(category_id) + .bind(info_hash) + .bind(size) + .bind(name) + .bind(pieces) + .bind(piece_length) + .bind(private) + .bind(root_hash) + .bind(date_uploaded) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 5d6b733c..b6f23e76 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -7,20 +7,25 @@ //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::utils::parse_torrent::decode_torrent; +use crate::{ + models::torrent_file::Torrent, + upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0, +}; use chrono::prelude::{DateTime, Utc}; +use chrono::NaiveDateTime; + +use std::{error, fs}; use std::{sync::Arc, time::SystemTime}; use crate::config::Configuration; -fn today_iso8601() -> String { - let dt: DateTime = SystemTime::now().into(); - format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) -} +pub async fn upgrade() { + // TODO: get from command arguments + let database_file = "data_v2.db".to_string(); // The new database + let upload_path = "./uploads".to_string(); // The relative dir where torrent files are stored -async fn current_db() -> Arc { - // Connect to the old v1.0.0 DB let cfg = match Configuration::load_from_file().await { Ok(config) => Arc::new(config), Err(error) => { @@ -30,10 +35,29 @@ async fn current_db() -> Arc { let settings = cfg.settings.read().await; - Arc::new(SqliteDatabaseV1_0_0::new(&settings.database.connect_url).await) + // Get connection to source database (current DB in settings) + let source_database = current_db(&settings.database.connect_url).await; + + // Get connection to destiny database + let dest_database = new_db(&database_file).await; + + println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); + + migrate_destiny_database(dest_database.clone()).await; + reset_destiny_database(dest_database.clone()).await; + transfer_categories(source_database.clone(), dest_database.clone()).await; + transfer_user_data(source_database.clone(), dest_database.clone()).await; + transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; + transfer_torrents(source_database.clone(), dest_database.clone(), &upload_path).await; + + // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. } -async fn new_db(db_filename: String) -> Arc { +async fn current_db(connect_url: &str) -> Arc { + Arc::new(SqliteDatabaseV1_0_0::new(connect_url).await) +} + +async fn new_db(db_filename: &str) -> Arc { let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) } @@ -170,6 +194,11 @@ async fn transfer_user_data( } } +fn today_iso8601() -> String { + let dt: DateTime = SystemTime::now().into(); + format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) +} + async fn transfer_tracker_keys( source_database: Arc, dest_database: Arc, @@ -212,18 +241,116 @@ async fn transfer_tracker_keys( } } -pub async fn upgrade() { - // Get connections to source adn destiny databases - let source_database = current_db().await; - let dest_database = new_db("data_v2.db".to_string()).await; +async fn transfer_torrents( + source_database: Arc, + dest_database: Arc, + upload_path: &str, +) { + println!("Transferring torrents ..."); - println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); + // Transfer table `torrust_torrents_files` - migrate_destiny_database(dest_database.clone()).await; - reset_destiny_database(dest_database.clone()).await; - transfer_categories(source_database.clone(), dest_database.clone()).await; - transfer_user_data(source_database.clone(), dest_database.clone()).await; - transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; + // Although the The table `torrust_torrents_files` existed in version v1.0.0 + // it was was not used. - // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. + // Transfer table `torrust_torrents` + + let torrents = source_database.get_torrents().await.unwrap(); + + for torrent in &torrents { + // [v2] table torrust_torrents + + println!( + "[v2][torrust_torrents] adding the torrent: {:?} ...", + &torrent.torrent_id + ); + + // TODO: confirm with @WarmBeer that + // - All torrents were public in version v1.0.0 + // - Infohashes were in lowercase en v1.0. and uppercase in version v2.0.0 + let private = false; + + let uploader = source_database + .get_user_by_username(&torrent.uploader) + .await + .unwrap(); + + if uploader.username != torrent.uploader { + panic!( + "Error copying torrent {:?}. Uploader in torrent does username", + &torrent.torrent_id + ); + } + + let filepath = format!("{}/{}.torrent", upload_path, &torrent.torrent_id); + + let torrent_from_file = read_torrent_from_file(&filepath).unwrap(); + + let pieces = torrent_from_file.info.get_pieces_as_string(); + let root_hash = torrent_from_file.info.get_root_hash_as_i64(); + + let id = dest_database + .insert_torrent( + torrent.torrent_id, + uploader.user_id, + torrent.category_id, + &torrent_from_file.info_hash(), + torrent.file_size, + &torrent_from_file.info.name, + &pieces, + torrent_from_file.info.piece_length, + private, + root_hash, + &convert_timestamp_to_datetime(torrent.upload_date), + ) + .await + .unwrap(); + + if id != torrent.torrent_id { + panic!( + "Error copying torrent {:?} from source DB to destiny DB", + &torrent.torrent_id + ); + } + + println!( + "[v2][torrust_torrents] torrent: {:?} added.", + &torrent.torrent_id + ); + + // [v2] table torrust_torrent_files + + // TODO + + // [v2] table torrust_torrent_announce_urls + + // TODO + + // [v2] table torrust_torrent_info + + // TODO + } +} + +fn read_torrent_from_file(path: &str) -> Result> { + let contents = match fs::read(path) { + Ok(contents) => contents, + Err(e) => return Err(e.into()), + }; + + match decode_torrent(&contents) { + Ok(torrent) => Ok(torrent), + Err(e) => Err(e), + } +} + +fn convert_timestamp_to_datetime(timestamp: i64) -> String { + // The expected format in database is: 2022-11-04 09:53:57 + // MySQL uses a DATETIME column and SQLite uses a TEXT column. + + let naive_datetime = NaiveDateTime::from_timestamp(timestamp, 0); + let datetime_again: DateTime = DateTime::from_utc(naive_datetime, Utc); + + // Format without timezone + datetime_again.format("%Y-%m-%d %H:%M:%S").to_string() } From 03e4befa186c6e0cb6b541385f344b0e3eb059e5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 7 Nov 2022 16:35:07 +0000 Subject: [PATCH 15/53] feat: [#56] remove unused scripts and write basic upgrage guide --- upgrades/from_v1_0_0_to_v2_0_0/README.md | 143 +++--------------- .../docker/start_mysql.sh | 10 -- .../docker/start_mysql_client.sh | 3 - .../docker/stop_mysql.sh | 3 - 4 files changed, 22 insertions(+), 137 deletions(-) delete mode 100755 upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh delete mode 100755 upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql_client.sh delete mode 100755 upgrades/from_v1_0_0_to_v2_0_0/docker/stop_mysql.sh diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md index af9a9b69..ab04a8b4 100644 --- a/upgrades/from_v1_0_0_to_v2_0_0/README.md +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -1,133 +1,34 @@ -# DB migration +# Upgrade from v1.0.0 to v2.0.0 -With the console command `cargo run --bin upgrade` you can migrate data from `v1.0.0` to `v2.0.0`. This migration includes: +## How-to -- Changing the DB schema. -- Transferring the torrent files in the dir `uploads` to the database. +To upgrade from version `v1.0.0` to `v2.0.0` you have to follow these steps: -## SQLite3 +- Back up your current database and the `uploads` folder. You can find which database and upload folder are you using in the `Config.toml` file in the root folder of your installation. +- Set up a local environment exactly as you have it in production with your production data (DB and torrents folder). +- Run the application locally with: `cargo run`. +- Execute the upgrader command: `cargo run --bin upgrade` +- A new SQLite file should have been created in the root folder: `data_v2.db` +- Stop the running application and change the DB configuration to use the newly generated configuration: -TODO - -## MySQL8 - -Please, - -> WARNING: MySQL migration is not implemented yet. We also provide docker infrastructure to run mysql during implementation of a migration tool. - -and also: - -> WARNING: We are not using a persisted volume. If you remove the volume used by the container you lose the database data. - -Run the docker container and connect using the console client: - -```s -./upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh -./upgrades/from_v1_0_0_to_v2_0_0/docker/mysql_client.sh -``` - -Once you are connected to the client you can create databases with: - -```s -create database torrust_v1; -create database torrust_v2; -``` - -After creating databases you should see something like this: - -```s -mysql> show databases; -+--------------------+ -| Database | -+--------------------+ -| information_schema | -| mysql | -| performance_schema | -| sys | -| torrust_v1 | -| torrust_v2 | -+--------------------+ -6 rows in set (0.001 sec) -``` - -How to connect from outside the container: - -```s -mysql -h127.0.0.1 -uroot -pdb-root-password -``` - -## Create DB for backend `v2.0.0` - -You need to create an empty new database for v2.0.0. - -You need to change the configuration in `config.toml` file to use MySQL: - -```yml +```toml [database] -connect_url = "mysql://root:db-root-password@127.0.0.1/torrust_v2" -``` - -After running the backend with `cargo run` you should see the tables created by migrations: - -```s -mysql> show tables; -+-------------------------------+ -| Tables_in_torrust_v2 | -+-------------------------------+ -| _sqlx_migrations | -| torrust_categories | -| torrust_torrent_announce_urls | -| torrust_torrent_files | -| torrust_torrent_info | -| torrust_torrent_tracker_stats | -| torrust_torrents | -| torrust_tracker_keys | -| torrust_user_authentication | -| torrust_user_bans | -| torrust_user_invitation_uses | -| torrust_user_invitations | -| torrust_user_profiles | -| torrust_user_public_keys | -| torrust_users | -+-------------------------------+ -15 rows in set (0.001 sec) -``` - -### Create DB for backend `v1.0.0` - -The `upgrade` command is going to import data from version `v1.0.0` (database and `uploads` folder) into the new empty database for `v2.0.0`. - -You can import data into the source database for testing with the `mysql` DB client or docker. - -Using `mysql` client: - -```s -mysql -h127.0.0.1 -uroot -pdb-root-password torrust_v1 < ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/db_migrations_v1_for_mysql_8.sql +connect_url = "sqlite://data_v2.db?mode=rwc" ``` -Using dockerized `mysql` client: +- Run the application again. +- Perform some tests. +- If all tests pass, stop the production service, replace the DB, and start it again. -```s -docker exec -i torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-password < ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/db_migrations_v1_for_mysql_8.sql -``` +## Tests -### Commands +Before replacing the DB in production you can make some tests like: -Connect to `mysql` client: - -```s -mysql -h127.0.0.1 -uroot -pdb-root-password torrust_v1 -``` +- Try to log in with a preexisting user. If you do not know any you can create a new "test" user in production before starting with the upgrade process. Users had a different hash algorithm for the password in v1. +- Try to create a new user. +- Try to upload and download a new torrent containing a single file. +- Try to upload and download a new torrent containing a folder. -Connect to dockerized `mysql` client: +## Notes -```s -docker exec -it torrust-index-backend-mysql mysql torrust_v1 -uroot -pdb-root-password -``` - -Backup DB: - -```s -mysqldump -h127.0.0.1 torrust_v1 -uroot -pdb-root-password > ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/v1_schema_dump.sql -mysqldump -h127.0.0.1 torrust_v2 -uroot -pdb-root-password > ./upgrades/from_v1_0_0_to_v2_0_0/db_schemas/v2_schema_dump.sql -``` +The `db_schemas` contains the snapshots of the source and destiny databases for this upgrade. diff --git a/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh b/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh deleted file mode 100755 index 5a245d32..00000000 --- a/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -docker run \ - --detach \ - --name torrust-index-backend-mysql \ - --env MYSQL_USER=db-user \ - --env MYSQL_PASSWORD=db-passwrod \ - --env MYSQL_ROOT_PASSWORD=db-root-password \ - -p 3306:3306 \ - mysql:8.0.30 # This version is used in tests diff --git a/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql_client.sh b/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql_client.sh deleted file mode 100755 index fed2a877..00000000 --- a/upgrades/from_v1_0_0_to_v2_0_0/docker/start_mysql_client.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker exec -it torrust-index-backend-mysql mysql -uroot -pdb-root-password diff --git a/upgrades/from_v1_0_0_to_v2_0_0/docker/stop_mysql.sh b/upgrades/from_v1_0_0_to_v2_0_0/docker/stop_mysql.sh deleted file mode 100755 index 19d7a786..00000000 --- a/upgrades/from_v1_0_0_to_v2_0_0/docker/stop_mysql.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker stop torrust-index-backend-mysql From 3fea6ea7819ee0dbed3095dc268b7bd3c09578eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Nov 2022 12:40:34 +0000 Subject: [PATCH 16/53] feat: [#56] trasnfer torrents (2/4 tables) from v1.0.0 to v2.0.0 --- .../databases/sqlite_v2_0_0.rs | 40 +++++++++++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 49 +++++++++++++++++++ upgrades/from_v1_0_0_to_v2_0_0/README.md | 2 +- 3 files changed, 90 insertions(+), 1 deletion(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 04c04216..8ce447b2 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -3,6 +3,9 @@ use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; use sqlx::{query, query_as, SqlitePool}; use crate::databases::database::DatabaseError; +use crate::models::torrent_file::TorrentFile; + +use super::sqlite_v1_0_0::Torrent; #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct Category { @@ -178,6 +181,43 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } + pub async fn insert_torrent_file_for_torrent_with_one_file( + &self, + torrent_id: i64, + md5sum: &Option, + length: i64, + ) -> Result { + query( + " + INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH) + VALUES (?, ?, ?)", + ) + .bind(md5sum) + .bind(torrent_id) + .bind(length) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn insert_torrent_file_for_torrent_with_multiple_files( + &self, + torrent: &Torrent, + file: &TorrentFile, + ) -> Result { + query( + "INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) + VALUES (?, ?, ?, ?)", + ) + .bind(file.md5sum.clone()) + .bind(torrent.torrent_id) + .bind(file.length) + .bind(file.path.join("/")) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index b6f23e76..71fa762b 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -322,6 +322,55 @@ async fn transfer_torrents( // TODO + println!("[v2][torrust_torrent_files] adding torrent files"); + + let _is_torrent_with_multiple_files = torrent_from_file.info.files.is_some(); + let is_torrent_with_a_single_file = torrent_from_file.info.length.is_some(); + + if is_torrent_with_a_single_file { + // Only one file is being shared: + // - "path" is NULL + // - "md5sum" can be NULL + + println!( + "[v2][torrust_torrent_files][one] adding torrent file {:?} with length {:?} ...", + &torrent_from_file.info.name, &torrent_from_file.info.length, + ); + + let file_id = dest_database + .insert_torrent_file_for_torrent_with_one_file( + torrent.torrent_id, + // TODO: it seems med5sum can be None. Why? When? + &torrent_from_file.info.md5sum.clone(), + torrent_from_file.info.length.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_files][one] torrent file insert result: {:?}", + &file_id + ); + } else { + // Multiple files are being shared + let files = torrent_from_file.info.files.as_ref().unwrap(); + + for file in files.iter() { + println!( + "[v2][torrust_torrent_files][multiple] adding torrent file: {:?} ...", + &file + ); + + let file_id = dest_database + .insert_torrent_file_for_torrent_with_multiple_files(torrent, file) + .await; + + println!( + "[v2][torrust_torrent_files][multiple] torrent file insert result: {:?}", + &file_id + ); + } + } + // [v2] table torrust_torrent_announce_urls // TODO diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md index ab04a8b4..e635f8a1 100644 --- a/upgrades/from_v1_0_0_to_v2_0_0/README.md +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -26,7 +26,7 @@ Before replacing the DB in production you can make some tests like: - Try to log in with a preexisting user. If you do not know any you can create a new "test" user in production before starting with the upgrade process. Users had a different hash algorithm for the password in v1. - Try to create a new user. -- Try to upload and download a new torrent containing a single file. +- Try to upload and download a new torrent containing a single file (with and without md5sum). - Try to upload and download a new torrent containing a folder. ## Notes From 8bdf32ffb219cb258b3fdae4ed181a9219b9416f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Nov 2022 12:48:13 +0000 Subject: [PATCH 17/53] feat: [#56] trasnfer torrents (3/4 tables) from v1.0.0 to v2.0.0 --- .../databases/sqlite_v2_0_0.rs | 13 +++++++++++++ src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 14 +++++++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 8ce447b2..3f1b3ade 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -218,6 +218,19 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } + pub async fn insert_torrent_info(&self, torrent: &Torrent) -> Result { + query( + "INSERT INTO torrust_torrent_info (torrent_id, title, description) + VALUES (?, ?, ?)", + ) + .bind(torrent.torrent_id) + .bind(torrent.title.clone()) + .bind(torrent.description.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 71fa762b..45f681db 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -377,7 +377,19 @@ async fn transfer_torrents( // [v2] table torrust_torrent_info - // TODO + println!( + "[v2][torrust_torrent_info] adding the torrent info for torrent {:?} ...", + &torrent.torrent_id + ); + + let id = dest_database.insert_torrent_info(torrent).await; + + println!( + "[v2][torrust_torrents] torrent info insert result: {:?}.", + &id + ); + + println!("Torrents transferred"); } } From 21174d4c746a1564ab1e3c9cdd59628648706bef Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Nov 2022 13:56:41 +0000 Subject: [PATCH 18/53] feat: [#56] trasnfer torrents (4/4 tables) from v1.0.0 to v2.0.0 --- .../databases/sqlite_v2_0_0.rs | 13 +++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 53 +++++++++++++++++-- 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 3f1b3ade..836ed864 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -231,6 +231,19 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } + pub async fn insert_torrent_announce_url( + &self, + torrent_id: i64, + tracker_url: &str, + ) -> Result { + query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") + .bind(torrent_id) + .bind(tracker_url) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { query("DELETE FROM torrust_categories;") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 45f681db..9f783bb7 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -268,6 +268,8 @@ async fn transfer_torrents( // TODO: confirm with @WarmBeer that // - All torrents were public in version v1.0.0 // - Infohashes were in lowercase en v1.0. and uppercase in version v2.0.0 + // - Only one option is used for announce url if we have two the announce and the announce list. + // And announce has priority over announce list. let private = false; let uploader = source_database @@ -371,10 +373,6 @@ async fn transfer_torrents( } } - // [v2] table torrust_torrent_announce_urls - - // TODO - // [v2] table torrust_torrent_info println!( @@ -389,8 +387,53 @@ async fn transfer_torrents( &id ); - println!("Torrents transferred"); + // [v2] table torrust_torrent_announce_urls + + println!( + "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent {:?} ...", + &torrent.torrent_id + ); + + if torrent_from_file.announce.is_some() { + println!("[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent {:?} ...", &torrent.torrent_id); + + let announce_url_id = dest_database + .insert_torrent_announce_url( + torrent.torrent_id, + &torrent_from_file.announce.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_announce_urls] torrent announce url insert result {:?} ...", + &announce_url_id + ); + } else if torrent_from_file.announce_list.is_some() { + // BEP-0012. Multiple trackers. + + println!("[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent {:?} ...", &torrent.torrent_id); + + // flatten the nested vec (this will however remove the) + let announce_urls = torrent_from_file + .announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>(); + + for tracker_url in announce_urls.iter() { + println!("[v2][torrust_torrent_announce_urls] adding the torrent announce url (from announce list) for torrent {:?} ...", &torrent.torrent_id); + + let announce_url_id = dest_database + .insert_torrent_announce_url(torrent.torrent_id, tracker_url) + .await; + + println!("[v2][torrust_torrent_announce_urls] torrent announce url insert result {:?} ...", &announce_url_id); + } + } } + println!("Torrents transferred"); } fn read_torrent_from_file(path: &str) -> Result> { From 99edf5257ee56d4212c4bed7978a3d56dba29af2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 10:17:04 +0000 Subject: [PATCH 19/53] feat: imported users have importation date instead of registrataion date - Now registration date is optional (we allow NULL) for imported users. - And imported users have an importation date, which is NULL for registered users throught the sdantadrd registration process. --- ...092556_torrust_user_date_registered_allow_null.sql | 1 + .../20221109095718_torrust_user_add_date_imported.sql | 1 + ...092556_torrust_user_date_registered_allow_null.sql | 11 +++++++++++ .../20221109095718_torrust_user_add_date_imported.sql | 1 + src/models/user.rs | 6 ++++-- .../from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs | 8 ++++---- src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 7 ++++--- 7 files changed, 26 insertions(+), 9 deletions(-) create mode 100644 migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql create mode 100644 migrations/mysql/20221109095718_torrust_user_add_date_imported.sql create mode 100644 migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql create mode 100644 migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql diff --git a/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql new file mode 100644 index 00000000..9f936f8a --- /dev/null +++ b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users CHANGE date_registered date_registered DATETIME NOT NULL \ No newline at end of file diff --git a/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql b/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql new file mode 100644 index 00000000..352a5e8f --- /dev/null +++ b/migrations/mysql/20221109095718_torrust_user_add_date_imported.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users ADD COLUMN date_imported DATETIME DEFAULT NULL \ No newline at end of file diff --git a/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql new file mode 100644 index 00000000..5757849c --- /dev/null +++ b/migrations/sqlite3/20221109092556_torrust_user_date_registered_allow_null.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS torrust_users_new ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + date_registered TEXT DEFAULT NULL, + administrator BOOL NOT NULL DEFAULT FALSE +); + +INSERT INTO torrust_users_new SELECT * FROM torrust_users; + +DROP TABLE torrust_users; + +ALTER TABLE torrust_users_new RENAME TO torrust_users \ No newline at end of file diff --git a/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql b/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql new file mode 100644 index 00000000..96dddd2f --- /dev/null +++ b/migrations/sqlite3/20221109095718_torrust_user_add_date_imported.sql @@ -0,0 +1 @@ +ALTER TABLE torrust_users ADD COLUMN date_imported TEXT DEFAULT NULL \ No newline at end of file diff --git a/src/models/user.rs b/src/models/user.rs index fdf86f76..f1418f3a 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -3,7 +3,8 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct User { pub user_id: i64, - pub date_registered: String, + pub date_registered: Option, + pub date_imported: Option, pub administrator: bool, } @@ -33,7 +34,8 @@ pub struct UserCompact { #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserFull { pub user_id: i64, - pub date_registered: String, + pub date_registered: Option, + pub date_imported: Option, pub administrator: bool, pub username: String, pub email: String, diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 836ed864..41d9327a 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -67,17 +67,17 @@ impl SqliteDatabaseV2_0_0 { }) } - pub async fn insert_user( + pub async fn insert_imported_user( &self, user_id: i64, - date_registered: &str, + date_imported: &str, administrator: bool, ) -> Result { query( - "INSERT INTO torrust_users (user_id, date_registered, administrator) VALUES (?, ?, ?)", + "INSERT INTO torrust_users (user_id, date_imported, administrator) VALUES (?, ?, ?)", ) .bind(user_id) - .bind(date_registered) + .bind(date_imported) .bind(administrator) .execute(&self.pool) .await diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 9f783bb7..0c3d99fd 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -3,7 +3,8 @@ //! NOTES for `torrust_users` table transfer: //! //! - In v2, the table `torrust_user` contains a field `date_registered` non existing in v1. -//! It's used the day when the upgrade command is executed. +//! We changed that columns to allow NULL. WE also added the new column `date_imported` with +//! the datetime when the upgrader was executed. //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. @@ -129,10 +130,10 @@ async fn transfer_user_data( &user.user_id, &user.username ); - let default_data_registered = today_iso8601(); + let date_imported = today_iso8601(); let id = dest_database - .insert_user(user.user_id, &default_data_registered, user.administrator) + .insert_imported_user(user.user_id, &date_imported, user.administrator) .await .unwrap(); From 715265490dc118c27f8ffeb2c1598efd92de5c98 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 10:46:44 +0000 Subject: [PATCH 20/53] refactor: [#56] rename structs for DB records Add the "Record" suffix to structs representing DB records. In order to avoid mixing them up with models. --- .../databases/sqlite_v1_0_0.rs | 34 +++++++++---------- .../databases/sqlite_v2_0_0.rs | 28 +++++++-------- 2 files changed, 30 insertions(+), 32 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index 3f784db1..d763be6b 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -5,13 +5,13 @@ use sqlx::{query_as, SqlitePool}; use crate::databases::database::DatabaseError; #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct Category { +pub struct CategoryRecord { pub category_id: i64, pub name: String, } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct User { +pub struct UserRecord { pub user_id: i64, pub username: String, pub email: String, @@ -21,7 +21,7 @@ pub struct User { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct TrackerKey { +pub struct TrackerKeyRecord { pub key_id: i64, pub user_id: i64, pub key: String, @@ -29,7 +29,7 @@ pub struct TrackerKey { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct Torrent { +pub struct TorrentRecord { pub torrent_id: i64, pub uploader: String, pub info_hash: String, @@ -43,7 +43,7 @@ pub struct Torrent { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct TorrentFile { +pub struct TorrentFileRecord { pub file_id: i64, pub torrent_uid: i64, pub number: i64, @@ -64,8 +64,8 @@ impl SqliteDatabaseV1_0_0 { Self { pool: db } } - pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { - query_as::<_, Category>( + pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { + query_as::<_, CategoryRecord>( "SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC", ) .fetch_all(&self.pool) @@ -73,33 +73,33 @@ impl SqliteDatabaseV1_0_0 { .map_err(|_| DatabaseError::Error) } - pub async fn get_users(&self) -> Result, sqlx::Error> { - query_as::<_, User>("SELECT * FROM torrust_users ORDER BY user_id ASC") + pub async fn get_users(&self) -> Result, sqlx::Error> { + query_as::<_, UserRecord>("SELECT * FROM torrust_users ORDER BY user_id ASC") .fetch_all(&self.pool) .await } - pub async fn get_user_by_username(&self, username: &str) -> Result { - query_as::<_, User>("SELECT * FROM torrust_users WHERE username = ?") + pub async fn get_user_by_username(&self, username: &str) -> Result { + query_as::<_, UserRecord>("SELECT * FROM torrust_users WHERE username = ?") .bind(username) .fetch_one(&self.pool) .await } - pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { - query_as::<_, TrackerKey>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") + pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { + query_as::<_, TrackerKeyRecord>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") .fetch_all(&self.pool) .await } - pub async fn get_torrents(&self) -> Result, sqlx::Error> { - query_as::<_, Torrent>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") + pub async fn get_torrents(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentRecord>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") .fetch_all(&self.pool) .await } - pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { - query_as::<_, TorrentFile>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") + pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentFileRecord>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") .fetch_all(&self.pool) .await } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 41d9327a..186bb712 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -5,10 +5,10 @@ use sqlx::{query, query_as, SqlitePool}; use crate::databases::database::DatabaseError; use crate::models::torrent_file::TorrentFile; -use super::sqlite_v1_0_0::Torrent; +use super::sqlite_v1_0_0::TorrentRecord; #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct Category { +pub struct CategoryRecord { pub category_id: i64, pub name: String, } @@ -39,8 +39,8 @@ impl SqliteDatabaseV2_0_0 { .map_err(|_| DatabaseError::Error) } - pub async fn get_categories(&self) -> Result, DatabaseError> { - query_as::<_, Category>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") + pub async fn get_categories(&self) -> Result, DatabaseError> { + query_as::<_, CategoryRecord>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") .fetch_all(&self.pool) .await .map_err(|_| DatabaseError::Error) @@ -73,15 +73,13 @@ impl SqliteDatabaseV2_0_0 { date_imported: &str, administrator: bool, ) -> Result { - query( - "INSERT INTO torrust_users (user_id, date_imported, administrator) VALUES (?, ?, ?)", - ) - .bind(user_id) - .bind(date_imported) - .bind(administrator) - .execute(&self.pool) - .await - .map(|v| v.last_insert_rowid()) + query("INSERT INTO torrust_users (user_id, date_imported, administrator) VALUES (?, ?, ?)") + .bind(user_id) + .bind(date_imported) + .bind(administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) } pub async fn insert_user_profile( @@ -202,7 +200,7 @@ impl SqliteDatabaseV2_0_0 { pub async fn insert_torrent_file_for_torrent_with_multiple_files( &self, - torrent: &Torrent, + torrent: &TorrentRecord, file: &TorrentFile, ) -> Result { query( @@ -218,7 +216,7 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } - pub async fn insert_torrent_info(&self, torrent: &Torrent) -> Result { + pub async fn insert_torrent_info(&self, torrent: &TorrentRecord) -> Result { query( "INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, ?)", From b9bf405d9793ed792f9960925e7ead4872858b70 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 11:03:24 +0000 Subject: [PATCH 21/53] feat: [#56] improve command output --- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 68 +++++++++---------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 0c3d99fd..4f96fb2f 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -5,6 +5,9 @@ //! - In v2, the table `torrust_user` contains a field `date_registered` non existing in v1. //! We changed that columns to allow NULL. WE also added the new column `date_imported` with //! the datetime when the upgrader was executed. +//! +//! NOTES for `torrust_user_profiles` table transfer: +//! //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. @@ -50,8 +53,6 @@ pub async fn upgrade() { transfer_user_data(source_database.clone(), dest_database.clone()).await; transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; transfer_torrents(source_database.clone(), dest_database.clone(), &upload_path).await; - - // TODO: WIP. We have to transfer data from the 5 tables in V1 and the torrent files in folder `uploads`. } async fn current_db(connect_url: &str) -> Arc { @@ -86,12 +87,12 @@ async fn transfer_categories( println!("[v1] categories: {:?}", &source_categories); let result = dest_database.reset_categories_sequence().await.unwrap(); - println!("result {:?}", result); + println!("[v2] reset categories sequence result {:?}", result); for cat in &source_categories { println!( - "[v2] adding category: {:?} {:?} ...", - &cat.category_id, &cat.name + "[v2] adding category {:?} with id {:?} ...", + &cat.name, &cat.category_id ); let id = dest_database .insert_category_and_get_id(&cat.name) @@ -126,8 +127,8 @@ async fn transfer_user_data( // [v2] table torrust_users println!( - "[v2][torrust_users] adding user: {:?} {:?} ...", - &user.user_id, &user.username + "[v2][torrust_users] adding user with username {:?} and id {:?} ...", + &user.username, &user.user_id ); let date_imported = today_iso8601(); @@ -152,8 +153,8 @@ async fn transfer_user_data( // [v2] table torrust_user_profiles println!( - "[v2][torrust_user_profiles] adding user: {:?} {:?} ...", - &user.user_id, &user.username + "[v2][torrust_user_profiles] adding user profile for user with username {:?} and id {:?} ...", + &user.username, &user.user_id ); let default_user_bio = "".to_string(); @@ -172,14 +173,14 @@ async fn transfer_user_data( .unwrap(); println!( - "[v2][torrust_user_profiles] user: {:?} {:?} added.", - &user.user_id, &user.username + "[v2][torrust_user_profiles] user profile added for user with username {:?} and id {:?}.", + &user.username, &user.user_id ); // [v2] table torrust_user_authentication println!( - "[v2][torrust_user_authentication] adding password hash ({:?}) for user ({:?}) ...", + "[v2][torrust_user_authentication] adding password hash ({:?}) for user id ({:?}) ...", &user.password, &user.user_id ); @@ -189,7 +190,7 @@ async fn transfer_user_data( .unwrap(); println!( - "[v2][torrust_user_authentication] password hash ({:?}) added for user ({:?}).", + "[v2][torrust_user_authentication] password hash ({:?}) added for user id ({:?}).", &user.password, &user.user_id ); } @@ -214,7 +215,7 @@ async fn transfer_tracker_keys( // [v2] table torrust_tracker_keys println!( - "[v2][torrust_users] adding the tracker key: {:?} ...", + "[v2][torrust_users] adding the tracker key with id {:?} ...", &tracker_key.key_id ); @@ -236,7 +237,7 @@ async fn transfer_tracker_keys( } println!( - "[v2][torrust_tracker_keys] tracker key: {:?} added.", + "[v2][torrust_tracker_keys] tracker key with id {:?} added.", &tracker_key.key_id ); } @@ -266,11 +267,7 @@ async fn transfer_torrents( &torrent.torrent_id ); - // TODO: confirm with @WarmBeer that - // - All torrents were public in version v1.0.0 - // - Infohashes were in lowercase en v1.0. and uppercase in version v2.0.0 - // - Only one option is used for announce url if we have two the announce and the announce list. - // And announce has priority over announce list. + // All torrents were public in version v1.0.0 let private = false; let uploader = source_database @@ -280,7 +277,8 @@ async fn transfer_torrents( if uploader.username != torrent.uploader { panic!( - "Error copying torrent {:?}. Uploader in torrent does username", + "Error copying torrent with id {:?}. + Username (`uploader`) in `torrust_torrents` table does not match `username` in `torrust_users` table", &torrent.torrent_id ); } @@ -317,26 +315,24 @@ async fn transfer_torrents( } println!( - "[v2][torrust_torrents] torrent: {:?} added.", + "[v2][torrust_torrents] torrent with id {:?} added.", &torrent.torrent_id ); // [v2] table torrust_torrent_files - // TODO - println!("[v2][torrust_torrent_files] adding torrent files"); let _is_torrent_with_multiple_files = torrent_from_file.info.files.is_some(); let is_torrent_with_a_single_file = torrent_from_file.info.length.is_some(); if is_torrent_with_a_single_file { - // Only one file is being shared: + // The torrent contains only one file then: // - "path" is NULL // - "md5sum" can be NULL println!( - "[v2][torrust_torrent_files][one] adding torrent file {:?} with length {:?} ...", + "[v2][torrust_torrent_files][single-file-torrent] adding torrent file {:?} with length {:?} ...", &torrent_from_file.info.name, &torrent_from_file.info.length, ); @@ -350,7 +346,7 @@ async fn transfer_torrents( .await; println!( - "[v2][torrust_torrent_files][one] torrent file insert result: {:?}", + "[v2][torrust_torrent_files][single-file-torrent] torrent file insert result: {:?}", &file_id ); } else { @@ -359,7 +355,7 @@ async fn transfer_torrents( for file in files.iter() { println!( - "[v2][torrust_torrent_files][multiple] adding torrent file: {:?} ...", + "[v2][torrust_torrent_files][multiple-file-torrent] adding torrent file: {:?} ...", &file ); @@ -368,7 +364,7 @@ async fn transfer_torrents( .await; println!( - "[v2][torrust_torrent_files][multiple] torrent file insert result: {:?}", + "[v2][torrust_torrent_files][multiple-file-torrent] torrent file insert result: {:?}", &file_id ); } @@ -377,7 +373,7 @@ async fn transfer_torrents( // [v2] table torrust_torrent_info println!( - "[v2][torrust_torrent_info] adding the torrent info for torrent {:?} ...", + "[v2][torrust_torrent_info] adding the torrent info for torrent id {:?} ...", &torrent.torrent_id ); @@ -391,12 +387,12 @@ async fn transfer_torrents( // [v2] table torrust_torrent_announce_urls println!( - "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent {:?} ...", + "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id ); if torrent_from_file.announce.is_some() { - println!("[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent {:?} ...", &torrent.torrent_id); + println!("[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); let announce_url_id = dest_database .insert_torrent_announce_url( @@ -406,13 +402,13 @@ async fn transfer_torrents( .await; println!( - "[v2][torrust_torrent_announce_urls] torrent announce url insert result {:?} ...", + "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", &announce_url_id ); } else if torrent_from_file.announce_list.is_some() { // BEP-0012. Multiple trackers. - println!("[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent {:?} ...", &torrent.torrent_id); + println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); // flatten the nested vec (this will however remove the) let announce_urls = torrent_from_file @@ -424,13 +420,13 @@ async fn transfer_torrents( .collect::>(); for tracker_url in announce_urls.iter() { - println!("[v2][torrust_torrent_announce_urls] adding the torrent announce url (from announce list) for torrent {:?} ...", &torrent.torrent_id); + println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); let announce_url_id = dest_database .insert_torrent_announce_url(torrent.torrent_id, tracker_url) .await; - println!("[v2][torrust_torrent_announce_urls] torrent announce url insert result {:?} ...", &announce_url_id); + println!("[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", &announce_url_id); } } } From 6bb4c53c7e84323fdaaca848cb9695ff7914f7ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 11:31:49 +0000 Subject: [PATCH 22/53] refactor: extract struct TorrentRecordV2 --- .../databases/sqlite_v1_0_0.rs | 34 +++--- .../databases/sqlite_v2_0_0.rs | 101 ++++++++++++------ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 36 ++----- 3 files changed, 95 insertions(+), 76 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index d763be6b..3328fd43 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -11,7 +11,7 @@ pub struct CategoryRecord { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct UserRecord { +pub struct UserRecordV1 { pub user_id: i64, pub username: String, pub email: String, @@ -21,7 +21,7 @@ pub struct UserRecord { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct TrackerKeyRecord { +pub struct TrackerKeyRecordV1 { pub key_id: i64, pub user_id: i64, pub key: String, @@ -29,7 +29,7 @@ pub struct TrackerKeyRecord { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct TorrentRecord { +pub struct TorrentRecordV1 { pub torrent_id: i64, pub uploader: String, pub info_hash: String, @@ -43,7 +43,7 @@ pub struct TorrentRecord { } #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct TorrentFileRecord { +pub struct TorrentFileRecordV1 { pub file_id: i64, pub torrent_uid: i64, pub number: i64, @@ -73,34 +73,36 @@ impl SqliteDatabaseV1_0_0 { .map_err(|_| DatabaseError::Error) } - pub async fn get_users(&self) -> Result, sqlx::Error> { - query_as::<_, UserRecord>("SELECT * FROM torrust_users ORDER BY user_id ASC") + pub async fn get_users(&self) -> Result, sqlx::Error> { + query_as::<_, UserRecordV1>("SELECT * FROM torrust_users ORDER BY user_id ASC") .fetch_all(&self.pool) .await } - pub async fn get_user_by_username(&self, username: &str) -> Result { - query_as::<_, UserRecord>("SELECT * FROM torrust_users WHERE username = ?") + pub async fn get_user_by_username(&self, username: &str) -> Result { + query_as::<_, UserRecordV1>("SELECT * FROM torrust_users WHERE username = ?") .bind(username) .fetch_one(&self.pool) .await } - pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { - query_as::<_, TrackerKeyRecord>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") + pub async fn get_tracker_keys(&self) -> Result, sqlx::Error> { + query_as::<_, TrackerKeyRecordV1>("SELECT * FROM torrust_tracker_keys ORDER BY key_id ASC") .fetch_all(&self.pool) .await } - pub async fn get_torrents(&self) -> Result, sqlx::Error> { - query_as::<_, TorrentRecord>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") + pub async fn get_torrents(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentRecordV1>("SELECT * FROM torrust_torrents ORDER BY torrent_id ASC") .fetch_all(&self.pool) .await } - pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { - query_as::<_, TorrentFileRecord>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") - .fetch_all(&self.pool) - .await + pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { + query_as::<_, TorrentFileRecordV1>( + "SELECT * FROM torrust_torrent_files ORDER BY file_id ASC", + ) + .fetch_all(&self.pool) + .await } } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 186bb712..3cbf4020 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -1,17 +1,67 @@ +use chrono::{DateTime, NaiveDateTime, Utc}; use serde::{Deserialize, Serialize}; use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; use sqlx::{query, query_as, SqlitePool}; use crate::databases::database::DatabaseError; -use crate::models::torrent_file::TorrentFile; +use crate::models::torrent_file::{TorrentFile, TorrentInfo}; -use super::sqlite_v1_0_0::TorrentRecord; +use super::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct CategoryRecord { +pub struct CategoryRecordV2 { pub category_id: i64, pub name: String, } + +pub struct TorrentRecordV2 { + pub torrent_id: i64, + pub uploader_id: i64, + pub category_id: i64, + pub info_hash: String, + pub size: i64, + pub name: String, + pub pieces: String, + pub piece_length: i64, + pub private: bool, + pub root_hash: i64, + pub date_uploaded: String, +} + +impl TorrentRecordV2 { + pub fn from_v1_data( + torrent: &TorrentRecordV1, + torrent_info: &TorrentInfo, + uploader: &UserRecordV1, + private: bool, + ) -> Self { + Self { + torrent_id: torrent.torrent_id, + uploader_id: uploader.user_id, + category_id: torrent.category_id, + info_hash: torrent.info_hash.clone(), + size: torrent.file_size, + name: torrent_info.name.clone(), + pieces: torrent_info.get_pieces_as_string(), + piece_length: torrent_info.piece_length, + private, + root_hash: torrent_info.get_root_hash_as_i64(), + date_uploaded: convert_timestamp_to_datetime(torrent.upload_date), + } + } +} + +fn convert_timestamp_to_datetime(timestamp: i64) -> String { + // The expected format in database is: 2022-11-04 09:53:57 + // MySQL uses a DATETIME column and SQLite uses a TEXT column. + + let naive_datetime = NaiveDateTime::from_timestamp(timestamp, 0); + let datetime_again: DateTime = DateTime::from_utc(naive_datetime, Utc); + + // Format without timezone + datetime_again.format("%Y-%m-%d %H:%M:%S").to_string() +} + pub struct SqliteDatabaseV2_0_0 { pub pool: SqlitePool, } @@ -39,8 +89,8 @@ impl SqliteDatabaseV2_0_0 { .map_err(|_| DatabaseError::Error) } - pub async fn get_categories(&self) -> Result, DatabaseError> { - query_as::<_, CategoryRecord>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") + pub async fn get_categories(&self) -> Result, DatabaseError> { + query_as::<_, CategoryRecordV2>("SELECT tc.category_id, tc.name, COUNT(tt.category_id) as num_torrents FROM torrust_categories tc LEFT JOIN torrust_torrents tt on tc.category_id = tt.category_id GROUP BY tc.name") .fetch_all(&self.pool) .await .map_err(|_| DatabaseError::Error) @@ -133,20 +183,7 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } - pub async fn insert_torrent( - &self, - torrent_id: i64, - uploader_id: i64, - category_id: i64, - info_hash: &str, - size: i64, - name: &str, - pieces: &str, - piece_length: i64, - private: bool, - root_hash: i64, - date_uploaded: &str, - ) -> Result { + pub async fn insert_torrent(&self, torrent: &TorrentRecordV2) -> Result { query( " INSERT INTO torrust_torrents ( @@ -163,17 +200,17 @@ impl SqliteDatabaseV2_0_0 { date_uploaded ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", ) - .bind(torrent_id) - .bind(uploader_id) - .bind(category_id) - .bind(info_hash) - .bind(size) - .bind(name) - .bind(pieces) - .bind(piece_length) - .bind(private) - .bind(root_hash) - .bind(date_uploaded) + .bind(torrent.torrent_id) + .bind(torrent.uploader_id) + .bind(torrent.category_id) + .bind(torrent.info_hash.clone()) + .bind(torrent.size) + .bind(torrent.name.clone()) + .bind(torrent.pieces.clone()) + .bind(torrent.piece_length) + .bind(torrent.private) + .bind(torrent.root_hash) + .bind(torrent.date_uploaded.clone()) .execute(&self.pool) .await .map(|v| v.last_insert_rowid()) @@ -200,7 +237,7 @@ impl SqliteDatabaseV2_0_0 { pub async fn insert_torrent_file_for_torrent_with_multiple_files( &self, - torrent: &TorrentRecord, + torrent: &TorrentRecordV1, file: &TorrentFile, ) -> Result { query( @@ -216,7 +253,7 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } - pub async fn insert_torrent_info(&self, torrent: &TorrentRecord) -> Result { + pub async fn insert_torrent_info(&self, torrent: &TorrentRecordV1) -> Result { query( "INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, ?)", diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 4f96fb2f..28bdc87e 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -11,14 +11,15 @@ //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{ + SqliteDatabaseV2_0_0, TorrentRecordV2, +}; use crate::utils::parse_torrent::decode_torrent; use crate::{ models::torrent_file::Torrent, upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0, }; use chrono::prelude::{DateTime, Utc}; -use chrono::NaiveDateTime; use std::{error, fs}; use std::{sync::Arc, time::SystemTime}; @@ -287,23 +288,13 @@ async fn transfer_torrents( let torrent_from_file = read_torrent_from_file(&filepath).unwrap(); - let pieces = torrent_from_file.info.get_pieces_as_string(); - let root_hash = torrent_from_file.info.get_root_hash_as_i64(); - let id = dest_database - .insert_torrent( - torrent.torrent_id, - uploader.user_id, - torrent.category_id, - &torrent_from_file.info_hash(), - torrent.file_size, - &torrent_from_file.info.name, - &pieces, - torrent_from_file.info.piece_length, + .insert_torrent(&TorrentRecordV2::from_v1_data( + torrent, + &torrent_from_file.info, + &uploader, private, - root_hash, - &convert_timestamp_to_datetime(torrent.upload_date), - ) + )) .await .unwrap(); @@ -444,14 +435,3 @@ fn read_torrent_from_file(path: &str) -> Result> Err(e) => Err(e), } } - -fn convert_timestamp_to_datetime(timestamp: i64) -> String { - // The expected format in database is: 2022-11-04 09:53:57 - // MySQL uses a DATETIME column and SQLite uses a TEXT column. - - let naive_datetime = NaiveDateTime::from_timestamp(timestamp, 0); - let datetime_again: DateTime = DateTime::from_utc(naive_datetime, Utc); - - // Format without timezone - datetime_again.format("%Y-%m-%d %H:%M:%S").to_string() -} From 72dc1398878c7e175ec460c4d85564427d5518bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 11:36:30 +0000 Subject: [PATCH 23/53] refactor: reformat sql queries --- .../databases/sqlite_v2_0_0.rs | 59 ++++++++----------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 3cbf4020..3021b352 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -222,17 +222,13 @@ impl SqliteDatabaseV2_0_0 { md5sum: &Option, length: i64, ) -> Result { - query( - " - INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH) - VALUES (?, ?, ?)", - ) - .bind(md5sum) - .bind(torrent_id) - .bind(length) - .execute(&self.pool) - .await - .map(|v| v.last_insert_rowid()) + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH) VALUES (?, ?, ?)") + .bind(md5sum) + .bind(torrent_id) + .bind(length) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) } pub async fn insert_torrent_file_for_torrent_with_multiple_files( @@ -240,9 +236,7 @@ impl SqliteDatabaseV2_0_0 { torrent: &TorrentRecordV1, file: &TorrentFile, ) -> Result { - query( - "INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) - VALUES (?, ?, ?, ?)", + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) VALUES (?, ?, ?, ?)", ) .bind(file.md5sum.clone()) .bind(torrent.torrent_id) @@ -254,16 +248,13 @@ impl SqliteDatabaseV2_0_0 { } pub async fn insert_torrent_info(&self, torrent: &TorrentRecordV1) -> Result { - query( - "INSERT INTO torrust_torrent_info (torrent_id, title, description) - VALUES (?, ?, ?)", - ) - .bind(torrent.torrent_id) - .bind(torrent.title.clone()) - .bind(torrent.description.clone()) - .execute(&self.pool) - .await - .map(|v| v.last_insert_rowid()) + query("INSERT INTO torrust_torrent_info (torrent_id, title, description) VALUES (?, ?, ?)") + .bind(torrent.torrent_id) + .bind(torrent.title.clone()) + .bind(torrent.description.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) } pub async fn insert_torrent_announce_url( @@ -280,52 +271,52 @@ impl SqliteDatabaseV2_0_0 { } pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { - query("DELETE FROM torrust_categories;") + query("DELETE FROM torrust_categories") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_torrents;") + query("DELETE FROM torrust_torrents") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_tracker_keys;") + query("DELETE FROM torrust_tracker_keys") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_users;") + query("DELETE FROM torrust_users") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_authentication;") + query("DELETE FROM torrust_user_authentication") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_bans;") + query("DELETE FROM torrust_user_bans") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_invitations;") + query("DELETE FROM torrust_user_invitations") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_profiles;") + query("DELETE FROM torrust_user_profiles") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_torrents;") + query("DELETE FROM torrust_torrents") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_public_keys;") + query("DELETE FROM torrust_user_public_keys") .execute(&self.pool) .await .unwrap(); From 309e141662837af37b6c07a4526319cfd2f70652 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 11:53:16 +0000 Subject: [PATCH 24/53] fix: take torrent private flag from torrent file We were assuming "private" to be always true and we have to use the value inside the torrent file. --- .../from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs | 7 +++---- src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 6 +----- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 3021b352..21dc28ff 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -23,7 +23,7 @@ pub struct TorrentRecordV2 { pub name: String, pub pieces: String, pub piece_length: i64, - pub private: bool, + pub private: Option, pub root_hash: i64, pub date_uploaded: String, } @@ -33,7 +33,6 @@ impl TorrentRecordV2 { torrent: &TorrentRecordV1, torrent_info: &TorrentInfo, uploader: &UserRecordV1, - private: bool, ) -> Self { Self { torrent_id: torrent.torrent_id, @@ -44,7 +43,7 @@ impl TorrentRecordV2 { name: torrent_info.name.clone(), pieces: torrent_info.get_pieces_as_string(), piece_length: torrent_info.piece_length, - private, + private: torrent_info.private, root_hash: torrent_info.get_root_hash_as_i64(), date_uploaded: convert_timestamp_to_datetime(torrent.upload_date), } @@ -208,7 +207,7 @@ impl SqliteDatabaseV2_0_0 { .bind(torrent.name.clone()) .bind(torrent.pieces.clone()) .bind(torrent.piece_length) - .bind(torrent.private) + .bind(torrent.private.unwrap_or(0)) .bind(torrent.root_hash) .bind(torrent.date_uploaded.clone()) .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 28bdc87e..304a6fcb 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -268,9 +268,6 @@ async fn transfer_torrents( &torrent.torrent_id ); - // All torrents were public in version v1.0.0 - let private = false; - let uploader = source_database .get_user_by_username(&torrent.uploader) .await @@ -292,8 +289,7 @@ async fn transfer_torrents( .insert_torrent(&TorrentRecordV2::from_v1_data( torrent, &torrent_from_file.info, - &uploader, - private, + &uploader )) .await .unwrap(); From f620e05393f0461820b32300a926341c58292779 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 12:26:05 +0000 Subject: [PATCH 25/53] fix: [#56] announce list has precedence over announce From BEP-12: "In addition to the standard "announce" key, in the main area of the metadata file and not part of the "info" section, will be a new key, "announce-list". This key will refer to a list of lists of URLs, and will contain a list of tiers of announces. If the client is compatible with the multitracker specification, and if the "announce-list" key is present, the client will ignore the "announce" key and only use the URLs in "announce-list". --- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 304a6fcb..e9553392 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -289,7 +289,7 @@ async fn transfer_torrents( .insert_torrent(&TorrentRecordV2::from_v1_data( torrent, &torrent_from_file.info, - &uploader + &uploader, )) .await .unwrap(); @@ -378,21 +378,7 @@ async fn transfer_torrents( &torrent.torrent_id ); - if torrent_from_file.announce.is_some() { - println!("[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); - - let announce_url_id = dest_database - .insert_torrent_announce_url( - torrent.torrent_id, - &torrent_from_file.announce.unwrap(), - ) - .await; - - println!( - "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", - &announce_url_id - ); - } else if torrent_from_file.announce_list.is_some() { + if torrent_from_file.announce_list.is_some() { // BEP-0012. Multiple trackers. println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); @@ -415,6 +401,20 @@ async fn transfer_torrents( println!("[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", &announce_url_id); } + } else if torrent_from_file.announce.is_some() { + println!("[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + + let announce_url_id = dest_database + .insert_torrent_announce_url( + torrent.torrent_id, + &torrent_from_file.announce.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", + &announce_url_id + ); } } println!("Torrents transferred"); From 693994fd084de726f0bcb10ada1db42978ebf99a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 12:49:57 +0000 Subject: [PATCH 26/53] feat: add new dependency text_colorizer It allows adding colors to text output in console commands. --- Cargo.lock | 32 ++++++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 33 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index e65c2f14..0246f562 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -300,6 +300,17 @@ dependencies = [ "num-traits 0.2.14", ] +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.0.1" @@ -444,6 +455,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "colored" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd" +dependencies = [ + "atty", + "lazy_static", + "winapi", +] + [[package]] name = "config" version = "0.11.0" @@ -2396,6 +2418,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "text-colorizer" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b30f9b94bd367aacc3f62cd28668b10c7ae1784c7d27e223a1c21646221a9166" +dependencies = [ + "colored", +] + [[package]] name = "thiserror" version = "1.0.34" @@ -2610,6 +2641,7 @@ dependencies = [ "serde_json", "sha-1 0.10.0", "sqlx", + "text-colorizer", "tokio", "toml", "urlencoding", diff --git a/Cargo.toml b/Cargo.toml index 60206325..f16bfeeb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,3 +35,4 @@ lettre = { version = "0.10.0-rc.3", features = ["builder", "tokio1", "tokio1-rus sailfish = "0.4.0" regex = "1.6.0" pbkdf2 = "0.11.0" +text-colorizer = "1.0.0" From aabc3ef6398585e92aaeb9fd1d4a3f65e2b285ae Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 12:51:14 +0000 Subject: [PATCH 27/53] feat: the upgrader command takes args Removed hardocoded arguments. Now you can use the "upgrader" with: ``` cargo run --bin upgrade ./data_v2.db ./uploads ``` Where "./data_v2.db" is the newly generated DB and "./uploads" the folder where torrent files weere stored in version v1.0.0. --- src/bin/upgrade.rs | 2 +- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 56 +++++++++++++++++-- upgrades/from_v1_0_0_to_v2_0_0/README.md | 2 +- 3 files changed, 52 insertions(+), 8 deletions(-) diff --git a/src/bin/upgrade.rs b/src/bin/upgrade.rs index 15350d1d..1c5a27a3 100644 --- a/src/bin/upgrade.rs +++ b/src/bin/upgrade.rs @@ -1,6 +1,6 @@ //! Upgrade command. //! It updates the application from version v1.0.0 to v2.0.0. -//! You can execute it with: `cargo run --bin upgrade` +//! You can execute it with: `cargo run --bin upgrade ./data_v2.db ./uploads` use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::upgrade; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index e9553392..d084ede4 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -21,15 +21,54 @@ use crate::{ }; use chrono::prelude::{DateTime, Utc}; -use std::{error, fs}; +use std::{env, error, fs}; use std::{sync::Arc, time::SystemTime}; use crate::config::Configuration; +use text_colorizer::*; + +#[derive(Debug)] +struct Arguments { + database_file: String, // The new database + upload_path: String, // The relative dir where torrent files are stored +} + +fn print_usage() { + eprintln!( + "{} - migrates date from version v1.0.0 to v2.0.0. + + cargo run --bin upgrade TARGET_SLQLITE_FILE_PATH TORRENT_UPLOAD_DIR + + For example: + + cargo run --bin upgrade ./data_v2.db ./uploads + + ", + "Upgrader".green() + ); +} + +fn parse_args() -> Arguments { + let args: Vec = env::args().skip(1).collect(); + + if args.len() != 2 { + eprintln!( + "{} wrong number of arguments: expected 2, got {}", + "Error".red().bold(), + args.len() + ); + print_usage(); + } + + Arguments { + database_file: args[0].clone(), + upload_path: args[1].clone(), + } +} + pub async fn upgrade() { - // TODO: get from command arguments - let database_file = "data_v2.db".to_string(); // The new database - let upload_path = "./uploads".to_string(); // The relative dir where torrent files are stored + let args = parse_args(); let cfg = match Configuration::load_from_file().await { Ok(config) => Arc::new(config), @@ -44,7 +83,7 @@ pub async fn upgrade() { let source_database = current_db(&settings.database.connect_url).await; // Get connection to destiny database - let dest_database = new_db(&database_file).await; + let dest_database = new_db(&args.database_file).await; println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); @@ -53,7 +92,12 @@ pub async fn upgrade() { transfer_categories(source_database.clone(), dest_database.clone()).await; transfer_user_data(source_database.clone(), dest_database.clone()).await; transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; - transfer_torrents(source_database.clone(), dest_database.clone(), &upload_path).await; + transfer_torrents( + source_database.clone(), + dest_database.clone(), + &args.upload_path, + ) + .await; } async fn current_db(connect_url: &str) -> Arc { diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md index e635f8a1..c5ca1601 100644 --- a/upgrades/from_v1_0_0_to_v2_0_0/README.md +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -7,7 +7,7 @@ To upgrade from version `v1.0.0` to `v2.0.0` you have to follow these steps: - Back up your current database and the `uploads` folder. You can find which database and upload folder are you using in the `Config.toml` file in the root folder of your installation. - Set up a local environment exactly as you have it in production with your production data (DB and torrents folder). - Run the application locally with: `cargo run`. -- Execute the upgrader command: `cargo run --bin upgrade` +- Execute the upgrader command: `cargo run --bin upgrade ./data_v2.db ./uploads` - A new SQLite file should have been created in the root folder: `data_v2.db` - Stop the running application and change the DB configuration to use the newly generated configuration: From 217fae2a6672dfdc8e1b42b6d36bb5778b6e5479 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 13:16:19 +0000 Subject: [PATCH 28/53] feat: [#56] take source DB in upgrader command from args Instead of reading the current configuration. --- src/bin/upgrade.rs | 6 +-- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 44 +++++++++---------- upgrades/from_v1_0_0_to_v2_0_0/README.md | 2 +- 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/src/bin/upgrade.rs b/src/bin/upgrade.rs index 1c5a27a3..874f0fad 100644 --- a/src/bin/upgrade.rs +++ b/src/bin/upgrade.rs @@ -1,10 +1,10 @@ //! Upgrade command. //! It updates the application from version v1.0.0 to v2.0.0. -//! You can execute it with: `cargo run --bin upgrade ./data_v2.db ./uploads` +//! You can execute it with: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads` -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::upgrade; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::run_upgrader; #[actix_web::main] async fn main() { - upgrade().await; + run_upgrader().await; } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index d084ede4..6d9d5493 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -24,25 +24,26 @@ use chrono::prelude::{DateTime, Utc}; use std::{env, error, fs}; use std::{sync::Arc, time::SystemTime}; -use crate::config::Configuration; - use text_colorizer::*; +const NUMBER_OF_ARGUMENTS: i64 = 3; + #[derive(Debug)] -struct Arguments { - database_file: String, // The new database - upload_path: String, // The relative dir where torrent files are stored +pub struct Arguments { + source_database_file: String, // The source database in version v1.0.0 we want to migrate + destiny_database_file: String, // The new migrated database in version v2.0.0 + upload_path: String, // The relative dir where torrent files are stored } fn print_usage() { eprintln!( "{} - migrates date from version v1.0.0 to v2.0.0. - cargo run --bin upgrade TARGET_SLQLITE_FILE_PATH TORRENT_UPLOAD_DIR + cargo run --bin upgrade SOURCE_DB_FILE DESTINY_DB_FILE TORRENT_UPLOAD_DIR For example: - cargo run --bin upgrade ./data_v2.db ./uploads + cargo run --bin upgrade ./data.db ./data_v2.db ./uploads ", "Upgrader".green() @@ -52,38 +53,33 @@ fn print_usage() { fn parse_args() -> Arguments { let args: Vec = env::args().skip(1).collect(); - if args.len() != 2 { + if args.len() != 3 { eprintln!( - "{} wrong number of arguments: expected 2, got {}", + "{} wrong number of arguments: expected {}, got {}", "Error".red().bold(), + NUMBER_OF_ARGUMENTS, args.len() ); print_usage(); } Arguments { - database_file: args[0].clone(), - upload_path: args[1].clone(), + source_database_file: args[0].clone(), + destiny_database_file: args[1].clone(), + upload_path: args[2].clone(), } } -pub async fn upgrade() { - let args = parse_args(); - - let cfg = match Configuration::load_from_file().await { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } - }; - - let settings = cfg.settings.read().await; +pub async fn run_upgrader() { + upgrade(&parse_args()).await +} +pub async fn upgrade(args: &Arguments) { // Get connection to source database (current DB in settings) - let source_database = current_db(&settings.database.connect_url).await; + let source_database = current_db(&args.source_database_file).await; // Get connection to destiny database - let dest_database = new_db(&args.database_file).await; + let dest_database = new_db(&args.destiny_database_file).await; println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md index c5ca1601..cd2c1c11 100644 --- a/upgrades/from_v1_0_0_to_v2_0_0/README.md +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -7,7 +7,7 @@ To upgrade from version `v1.0.0` to `v2.0.0` you have to follow these steps: - Back up your current database and the `uploads` folder. You can find which database and upload folder are you using in the `Config.toml` file in the root folder of your installation. - Set up a local environment exactly as you have it in production with your production data (DB and torrents folder). - Run the application locally with: `cargo run`. -- Execute the upgrader command: `cargo run --bin upgrade ./data_v2.db ./uploads` +- Execute the upgrader command: `cargo run --bin upgrade ./data.db ./data_v2.db ./uploads` - A new SQLite file should have been created in the root folder: `data_v2.db` - Stop the running application and change the DB configuration to use the newly generated configuration: From 7f0a7eaae8fc10cb4179a1c3269065ba693d6fa3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 13:48:22 +0000 Subject: [PATCH 29/53] fix: open source db in read-only mode in upgarder --- src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 6d9d5493..55a8821d 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -30,9 +30,9 @@ const NUMBER_OF_ARGUMENTS: i64 = 3; #[derive(Debug)] pub struct Arguments { - source_database_file: String, // The source database in version v1.0.0 we want to migrate - destiny_database_file: String, // The new migrated database in version v2.0.0 - upload_path: String, // The relative dir where torrent files are stored + pub source_database_file: String, // The source database in version v1.0.0 we want to migrate + pub destiny_database_file: String, // The new migrated database in version v2.0.0 + pub upload_path: String, // The relative dir where torrent files are stored } fn print_usage() { @@ -96,8 +96,9 @@ pub async fn upgrade(args: &Arguments) { .await; } -async fn current_db(connect_url: &str) -> Arc { - Arc::new(SqliteDatabaseV1_0_0::new(connect_url).await) +async fn current_db(db_filename: &str) -> Arc { + let source_database_connect_url = format!("sqlite://{}?mode=ro", db_filename); + Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) } async fn new_db(db_filename: &str) -> Arc { From 44927e5ba8b349d6538ae13b806e95002a9f8087 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 17:12:35 +0000 Subject: [PATCH 30/53] test: [#56] WIP. scaffolding to test upgrader command --- tests/mod.rs | 1 + .../20210831113004_torrust_users.sql | 7 ++ .../20210904135524_torrust_tracker_keys.sql | 7 ++ .../20210905160623_torrust_categories.sql | 7 ++ .../20210907083424_torrust_torrent_files.sql | 8 ++ .../20211208143338_torrust_users.sql | 2 + .../20220308083424_torrust_torrents.sql | 14 ++++ .../20220308170028_torrust_categories.sql | 2 + tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 1 + .../from_v1_0_0_to_v2_0_0/output/.gitignore | 1 + tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs | 77 +++++++++++++++++++ tests/upgrades/mod.rs | 1 + 12 files changed, 128 insertions(+) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs create mode 100644 tests/upgrades/mod.rs diff --git a/tests/mod.rs b/tests/mod.rs index 22adeb6d..27bea3bd 100644 --- a/tests/mod.rs +++ b/tests/mod.rs @@ -1 +1,2 @@ mod databases; +pub mod upgrades; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql new file mode 100644 index 00000000..c535dfb9 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210831113004_torrust_users.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_users ( + user_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + username VARCHAR(32) NOT NULL UNIQUE, + email VARCHAR(100) NOT NULL UNIQUE, + email_verified BOOLEAN NOT NULL DEFAULT FALSE, + password TEXT NOT NULL +) \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql new file mode 100644 index 00000000..ef6f6865 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210904135524_torrust_tracker_keys.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS torrust_tracker_keys ( + key_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + user_id INTEGER, + key VARCHAR(32) NOT NULL, + valid_until INT(10) NOT NULL, + FOREIGN KEY(user_id) REFERENCES torrust_users(user_id) +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql new file mode 100644 index 00000000..c88abfe2 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210905160623_torrust_categories.sql @@ -0,0 +1,7 @@ +CREATE TABLE torrust_categories ( + category_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + name VARCHAR(64) NOT NULL UNIQUE +); + +INSERT INTO torrust_categories (name) VALUES +('movies'), ('tv shows'), ('games'), ('music'), ('software'); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql new file mode 100644 index 00000000..aeb3135a --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20210907083424_torrust_torrent_files.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS torrust_torrent_files ( + file_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + torrent_id INTEGER NOT NULL, + number INTEGER NOT NULL, + path VARCHAR(255) NOT NULL, + length INTEGER NOT NULL, + FOREIGN KEY(torrent_id) REFERENCES torrust_torrents(torrent_id) +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql new file mode 100644 index 00000000..0b574c69 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20211208143338_torrust_users.sql @@ -0,0 +1,2 @@ +ALTER TABLE torrust_users +ADD COLUMN administrator BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql new file mode 100644 index 00000000..413539a4 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308083424_torrust_torrents.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS torrust_torrents ( + torrent_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + uploader VARCHAR(32) NOT NULL, + info_hash VARCHAR(20) UNIQUE NOT NULL, + title VARCHAR(256) UNIQUE NOT NULL, + category_id INTEGER NOT NULL, + description TEXT, + upload_date INT(10) NOT NULL, + file_size BIGINT NOT NULL, + seeders INTEGER NOT NULL, + leechers INTEGER NOT NULL, + FOREIGN KEY(uploader) REFERENCES torrust_users(username) ON DELETE CASCADE, + FOREIGN KEY(category_id) REFERENCES torrust_categories(category_id) ON DELETE CASCADE +) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql new file mode 100644 index 00000000..b786dcd2 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/database/v1.0.0/migrations/20220308170028_torrust_categories.sql @@ -0,0 +1,2 @@ +ALTER TABLE torrust_categories +ADD COLUMN icon VARCHAR(32); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs new file mode 100644 index 00000000..3023529a --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -0,0 +1 @@ +pub mod tests; \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore b/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore new file mode 100644 index 00000000..3997bead --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/output/.gitignore @@ -0,0 +1 @@ +*.db \ No newline at end of file diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs new file mode 100644 index 00000000..3ab90e32 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs @@ -0,0 +1,77 @@ +//! You can run this test with: +//! +//! ```text +//! cargo test upgrade_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture +//! ``` +use std::fs; +use std::sync::Arc; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{upgrade, Arguments}; + +#[tokio::test] +async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { + /* TODO: + * - Insert data: user, tracker key and torrent + * - Assertions + */ + let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); + let debug_output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); + + let source_database_file = format!("{}source.db", debug_output_dir); + let destiny_database_file = format!("{}destiny.db", debug_output_dir); + + // TODO: use a unique temporary dir + fs::remove_file(&source_database_file).expect("Can't remove source DB file."); + fs::remove_file(&destiny_database_file).expect("Can't remove destiny DB file."); + + let source_database = source_db_connection(&source_database_file).await; + + migrate(source_database.clone(), &fixtures_dir).await; + + let args = Arguments { + source_database_file, + destiny_database_file, + upload_path: format!("{}uploads/", fixtures_dir), + }; + + upgrade(&args).await; +} + +async fn source_db_connection(source_database_file: &str) -> Arc { + let source_database_connect_url = format!("sqlite://{}?mode=rwc", source_database_file); + Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) +} + +/// Execute migrations for database in version v1.0.0 +async fn migrate(source_database: Arc, fixtures_dir: &str) { + let migrations_dir = format!("{}database/v1.0.0/migrations/", fixtures_dir); + + let migrations = vec![ + "20210831113004_torrust_users.sql", + "20210904135524_torrust_tracker_keys.sql", + "20210905160623_torrust_categories.sql", + "20210907083424_torrust_torrent_files.sql", + "20211208143338_torrust_users.sql", + "20220308083424_torrust_torrents.sql", + "20220308170028_torrust_categories.sql", + ]; + + for migration_file_name in &migrations { + let migration_file_path = format!("{}{}", &migrations_dir, &migration_file_name); + run_migration_from_file(source_database.clone(), &migration_file_path).await; + } +} + +async fn run_migration_from_file( + source_database: Arc, + migration_file_path: &str, +) { + println!("Executing migration: {:?}", migration_file_path); + + let sql = + fs::read_to_string(migration_file_path).expect("Should have been able to read the file"); + + let res = sqlx::query(&sql).execute(&source_database.pool).await; + + println!("Migration result {:?}", res); +} diff --git a/tests/upgrades/mod.rs b/tests/upgrades/mod.rs new file mode 100644 index 00000000..736d54f6 --- /dev/null +++ b/tests/upgrades/mod.rs @@ -0,0 +1 @@ +pub mod from_v1_0_0_to_v2_0_0; \ No newline at end of file From 6188b101d021ded52f9d4c900f6ff62b6cede4db Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 18:33:03 +0000 Subject: [PATCH 31/53] refactor: extract mod sqlite_v1_0_0 in tests --- tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 3 +- .../from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 53 +++++++++++++++++++ tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs | 41 ++------------ 3 files changed, 58 insertions(+), 39 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs index 3023529a..bb1d6613 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -1 +1,2 @@ -pub mod tests; \ No newline at end of file +pub mod sqlite_v1_0_0; +pub mod tests; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs new file mode 100644 index 00000000..1904df6c --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -0,0 +1,53 @@ +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::SqlitePool; +use std::fs; + +pub struct SqliteDatabaseV1_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV1_0_0 { + pub async fn db_connection(source_database_file: &str) -> Self { + let source_database_connect_url = format!("sqlite://{}?mode=rwc", source_database_file); + SqliteDatabaseV1_0_0::new(&source_database_connect_url).await + } + + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + /// Execute migrations for database in version v1.0.0 + pub async fn migrate(&self, fixtures_dir: &str) { + let migrations_dir = format!("{}database/v1.0.0/migrations/", fixtures_dir); + + let migrations = vec![ + "20210831113004_torrust_users.sql", + "20210904135524_torrust_tracker_keys.sql", + "20210905160623_torrust_categories.sql", + "20210907083424_torrust_torrent_files.sql", + "20211208143338_torrust_users.sql", + "20220308083424_torrust_torrents.sql", + "20220308170028_torrust_categories.sql", + ]; + + for migration_file_name in &migrations { + let migration_file_path = format!("{}{}", &migrations_dir, &migration_file_name); + self.run_migration_from_file(&migration_file_path).await; + } + } + + async fn run_migration_from_file(&self, migration_file_path: &str) { + println!("Executing migration: {:?}", migration_file_path); + + let sql = fs::read_to_string(migration_file_path) + .expect("Should have been able to read the file"); + + let res = sqlx::query(&sql).execute(&self.pool).await; + + println!("Migration result {:?}", res); + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs index 3ab90e32..79cfc866 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs @@ -3,9 +3,9 @@ //! ```text //! cargo test upgrade_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture //! ``` +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use std::fs; use std::sync::Arc; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{upgrade, Arguments}; #[tokio::test] @@ -26,7 +26,7 @@ async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { let source_database = source_db_connection(&source_database_file).await; - migrate(source_database.clone(), &fixtures_dir).await; + source_database.migrate(&fixtures_dir).await; let args = Arguments { source_database_file, @@ -38,40 +38,5 @@ async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { } async fn source_db_connection(source_database_file: &str) -> Arc { - let source_database_connect_url = format!("sqlite://{}?mode=rwc", source_database_file); - Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) -} - -/// Execute migrations for database in version v1.0.0 -async fn migrate(source_database: Arc, fixtures_dir: &str) { - let migrations_dir = format!("{}database/v1.0.0/migrations/", fixtures_dir); - - let migrations = vec![ - "20210831113004_torrust_users.sql", - "20210904135524_torrust_tracker_keys.sql", - "20210905160623_torrust_categories.sql", - "20210907083424_torrust_torrent_files.sql", - "20211208143338_torrust_users.sql", - "20220308083424_torrust_torrents.sql", - "20220308170028_torrust_categories.sql", - ]; - - for migration_file_name in &migrations { - let migration_file_path = format!("{}{}", &migrations_dir, &migration_file_name); - run_migration_from_file(source_database.clone(), &migration_file_path).await; - } -} - -async fn run_migration_from_file( - source_database: Arc, - migration_file_path: &str, -) { - println!("Executing migration: {:?}", migration_file_path); - - let sql = - fs::read_to_string(migration_file_path).expect("Should have been able to read the file"); - - let res = sqlx::query(&sql).execute(&source_database.pool).await; - - println!("Migration result {:?}", res); + Arc::new(SqliteDatabaseV1_0_0::db_connection(&source_database_file).await) } From f9931077ce9f6fde38e86af1fc003022113a614a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Nov 2022 19:29:08 +0000 Subject: [PATCH 32/53] tests: [#56] for users table in upgrader --- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 19 ++- tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 1 + .../from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 23 +++- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 37 +++++ tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs | 130 +++++++++++++++--- 5 files changed, 184 insertions(+), 26 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 55a8821d..bf3754fe 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -71,10 +71,11 @@ fn parse_args() -> Arguments { } pub async fn run_upgrader() { - upgrade(&parse_args()).await + let now = datetime_iso_8601(); + upgrade(&parse_args(), &now).await; } -pub async fn upgrade(args: &Arguments) { +pub async fn upgrade(args: &Arguments, date_imported: &str) { // Get connection to source database (current DB in settings) let source_database = current_db(&args.source_database_file).await; @@ -86,7 +87,12 @@ pub async fn upgrade(args: &Arguments) { migrate_destiny_database(dest_database.clone()).await; reset_destiny_database(dest_database.clone()).await; transfer_categories(source_database.clone(), dest_database.clone()).await; - transfer_user_data(source_database.clone(), dest_database.clone()).await; + transfer_user_data( + source_database.clone(), + dest_database.clone(), + date_imported, + ) + .await; transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; transfer_torrents( source_database.clone(), @@ -158,6 +164,7 @@ async fn transfer_categories( async fn transfer_user_data( source_database: Arc, dest_database: Arc, + date_imported: &str, ) { println!("Transferring users ..."); @@ -173,8 +180,6 @@ async fn transfer_user_data( &user.username, &user.user_id ); - let date_imported = today_iso8601(); - let id = dest_database .insert_imported_user(user.user_id, &date_imported, user.administrator) .await @@ -238,7 +243,9 @@ async fn transfer_user_data( } } -fn today_iso8601() -> String { +/// Current datetime in ISO8601 without time zone. +/// For example: 2022-11-10 10:35:15 +pub fn datetime_iso_8601() -> String { let dt: DateTime = SystemTime::now().into(); format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs index bb1d6613..0a1f301b 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -1,2 +1,3 @@ pub mod sqlite_v1_0_0; +pub mod sqlite_v2_0_0; pub mod tests; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs index 1904df6c..6da98170 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -1,15 +1,16 @@ use sqlx::sqlite::SqlitePoolOptions; -use sqlx::SqlitePool; +use sqlx::{query, SqlitePool}; use std::fs; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; pub struct SqliteDatabaseV1_0_0 { pub pool: SqlitePool, } impl SqliteDatabaseV1_0_0 { - pub async fn db_connection(source_database_file: &str) -> Self { - let source_database_connect_url = format!("sqlite://{}?mode=rwc", source_database_file); - SqliteDatabaseV1_0_0::new(&source_database_connect_url).await + pub async fn db_connection(database_file: &str) -> Self { + let connect_url = format!("sqlite://{}?mode=rwc", database_file); + Self::new(&connect_url).await } pub async fn new(database_url: &str) -> Self { @@ -24,6 +25,7 @@ impl SqliteDatabaseV1_0_0 { pub async fn migrate(&self, fixtures_dir: &str) { let migrations_dir = format!("{}database/v1.0.0/migrations/", fixtures_dir); + // TODO: read files from dir let migrations = vec![ "20210831113004_torrust_users.sql", "20210904135524_torrust_tracker_keys.sql", @@ -50,4 +52,17 @@ impl SqliteDatabaseV1_0_0 { println!("Migration result {:?}", res); } + + pub async fn insert_user(&self, user: &UserRecordV1) -> Result { + query("INSERT INTO torrust_users (user_id, username, email, email_verified, password, administrator) VALUES (?, ?, ?, ?, ?, ?)") + .bind(user.user_id) + .bind(user.username.clone()) + .bind(user.email.clone()) + .bind(user.email_verified) + .bind(user.password.clone()) + .bind(user.administrator) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs new file mode 100644 index 00000000..ba6f4831 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -0,0 +1,37 @@ +use serde::{Deserialize, Serialize}; +use sqlx::sqlite::SqlitePoolOptions; +use sqlx::{query_as, SqlitePool}; + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserRecordV2 { + pub user_id: i64, + pub date_registered: Option, + pub date_imported: Option, + pub administrator: bool, +} + +pub struct SqliteDatabaseV2_0_0 { + pub pool: SqlitePool, +} + +impl SqliteDatabaseV2_0_0 { + pub async fn db_connection(database_file: &str) -> Self { + let connect_url = format!("sqlite://{}?mode=rwc", database_file); + Self::new(&connect_url).await + } + + pub async fn new(database_url: &str) -> Self { + let db = SqlitePoolOptions::new() + .connect(database_url) + .await + .expect("Unable to create database pool."); + Self { pool: db } + } + + pub async fn get_user(&self, user_id: i64) -> Result { + query_as::<_, UserRecordV2>("SELECT * FROM torrust_users WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs index 79cfc866..e0f5f3bc 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs @@ -4,39 +4,137 @@ //! cargo test upgrade_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture //! ``` use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use argon2::password_hash::SaltString; +use argon2::{Argon2, PasswordHasher}; +use rand_core::OsRng; use std::fs; use std::sync::Arc; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{upgrade, Arguments}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{ + datetime_iso_8601, upgrade, Arguments, +}; #[tokio::test] async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { - /* TODO: - * - Insert data: user, tracker key and torrent - * - Assertions - */ + // Directories let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); - let debug_output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); + let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); - let source_database_file = format!("{}source.db", debug_output_dir); - let destiny_database_file = format!("{}destiny.db", debug_output_dir); - - // TODO: use a unique temporary dir - fs::remove_file(&source_database_file).expect("Can't remove source DB file."); - fs::remove_file(&destiny_database_file).expect("Can't remove destiny DB file."); + // Files + let source_database_file = format!("{}source.db", output_dir); + let destiny_database_file = format!("{}destiny.db", output_dir); + // Set up clean database + reset_databases(&source_database_file, &destiny_database_file); let source_database = source_db_connection(&source_database_file).await; - source_database.migrate(&fixtures_dir).await; + // Load data into database v1 + + // `torrust_users` table + + let user = UserRecordV1 { + user_id: 1, + username: "user01".to_string(), + email: "user01@torrust.com".to_string(), + email_verified: true, + password: hashed_valid_password(), + administrator: true, + }; + let user_id = source_database.insert_user(&user).await.unwrap(); + + // `torrust_tracker_keys` table + + // TODO + + // `torrust_torrents` table + + // TODO + + // Run the upgrader let args = Arguments { - source_database_file, - destiny_database_file, + source_database_file: source_database_file.clone(), + destiny_database_file: destiny_database_file.clone(), upload_path: format!("{}uploads/", fixtures_dir), }; + let now = datetime_iso_8601(); + upgrade(&args, &now).await; + + // Assertions in database v2 + + let destiny_database = destiny_db_connection(&destiny_database_file).await; + + // `torrust_users` table + + let imported_user = destiny_database.get_user(user_id).await.unwrap(); + + assert_eq!(imported_user.user_id, user.user_id); + assert!(imported_user.date_registered.is_none()); + assert_eq!(imported_user.date_imported.unwrap(), now); + assert_eq!(imported_user.administrator, user.administrator); + + // `torrust_user_authentication` table + + // TODO + + // `torrust_user_profiles` table + + // TODO + + // `torrust_tracker_keys` table - upgrade(&args).await; + // TODO + + // `torrust_torrents` table + + // TODO + + // `torrust_torrent_files` table + + // TODO + + // `torrust_torrent_info` table + + // TODO + + // `torrust_torrent_announce_urls` table + + // TODO } async fn source_db_connection(source_database_file: &str) -> Arc { Arc::new(SqliteDatabaseV1_0_0::db_connection(&source_database_file).await) } + +async fn destiny_db_connection(destiny_database_file: &str) -> Arc { + Arc::new(SqliteDatabaseV2_0_0::db_connection(&destiny_database_file).await) +} + +/// Reset databases from previous executions +fn reset_databases(source_database_file: &str, destiny_database_file: &str) { + // TODO: use a unique temporary dir + fs::remove_file(&source_database_file).expect("Can't remove source DB file."); + fs::remove_file(&destiny_database_file).expect("Can't remove destiny DB file."); +} + +fn hashed_valid_password() -> String { + hash_password(&valid_password()) +} + +fn valid_password() -> String { + "123456".to_string() +} + +fn hash_password(plain_password: &str) -> String { + let salt = SaltString::generate(&mut OsRng); + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + // Hash password to PHC string ($argon2id$v=19$...) + argon2 + .hash_password(plain_password.as_bytes(), &salt) + .unwrap() + .to_string() +} From 5d0def2943c13da54b4095d4430ed3f0d0f4f442 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 12:05:55 +0000 Subject: [PATCH 33/53] refactor: [#56] tests for upgrader Extract different testers for every type of data transferred. --- tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 3 +- .../from_v1_0_0_to_v2_0_0/testers/mod.rs | 1 + .../testers/user_data_tester.rs | 91 +++++++++++++++++++ .../{tests.rs => upgrader.rs} | 66 ++++---------- 4 files changed, 113 insertions(+), 48 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs rename tests/upgrades/from_v1_0_0_to_v2_0_0/{tests.rs => upgrader.rs} (62%) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs index 0a1f301b..7a5e3bb7 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -1,3 +1,4 @@ pub mod sqlite_v1_0_0; pub mod sqlite_v2_0_0; -pub mod tests; +pub mod testers; +pub mod upgrader; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs new file mode 100644 index 00000000..85968bfd --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs @@ -0,0 +1 @@ +pub mod user_data_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs new file mode 100644 index 00000000..a83b8077 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs @@ -0,0 +1,91 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use argon2::password_hash::SaltString; +use argon2::{Argon2, PasswordHasher}; +use rand_core::OsRng; +use std::sync::Arc; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; + +pub struct UserDataTester { + source_database: Arc, + destiny_database: Arc, + execution_time: String, + test_data: TestData, +} + +pub struct TestData { + pub user: UserRecordV1, +} + +impl UserDataTester { + pub fn new( + source_database: Arc, + destiny_database: Arc, + execution_time: &str, + ) -> Self { + let user = UserRecordV1 { + user_id: 1, + username: "user01".to_string(), + email: "user01@torrust.com".to_string(), + email_verified: true, + password: hashed_valid_password(), + administrator: true, + }; + + Self { + source_database, + destiny_database, + execution_time: execution_time.to_owned(), + test_data: TestData { user }, + } + } + + pub async fn load_data_into_source_db(&self) { + self.source_database + .insert_user(&self.test_data.user) + .await + .unwrap(); + } + + pub async fn assert(&self) { + self.assert_user().await; + } + + /// Table `torrust_users` + async fn assert_user(&self) { + let imported_user = self + .destiny_database + .get_user(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!(imported_user.user_id, self.test_data.user.user_id); + assert!(imported_user.date_registered.is_none()); + assert_eq!(imported_user.date_imported.unwrap(), self.execution_time); + assert_eq!( + imported_user.administrator, + self.test_data.user.administrator + ); + } +} + +fn hashed_valid_password() -> String { + hash_password(&valid_password()) +} + +fn valid_password() -> String { + "123456".to_string() +} + +fn hash_password(plain_password: &str) -> String { + let salt = SaltString::generate(&mut OsRng); + + // Argon2 with default params (Argon2id v19) + let argon2 = Argon2::default(); + + // Hash password to PHC string ($argon2id$v=19$...) + argon2 + .hash_password(plain_password.as_bytes(), &salt) + .unwrap() + .to_string() +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs similarity index 62% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index e0f5f3bc..4e9d4228 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/tests.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -1,22 +1,19 @@ //! You can run this test with: //! //! ```text -//! cargo test upgrade_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture +//! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture //! ``` use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use argon2::password_hash::SaltString; -use argon2::{Argon2, PasswordHasher}; -use rand_core::OsRng; +use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_data_tester::UserDataTester; use std::fs; use std::sync::Arc; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{ datetime_iso_8601, upgrade, Arguments, }; #[tokio::test] -async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { +async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Directories let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); @@ -25,24 +22,28 @@ async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { let source_database_file = format!("{}source.db", output_dir); let destiny_database_file = format!("{}destiny.db", output_dir); - // Set up clean database + // Set up clean source database reset_databases(&source_database_file, &destiny_database_file); let source_database = source_db_connection(&source_database_file).await; source_database.migrate(&fixtures_dir).await; + // Set up connection for the destiny database + let destiny_database = destiny_db_connection(&destiny_database_file).await; + + // The datetime when the upgrader is executed + let execution_time = datetime_iso_8601(); + // Load data into database v1 // `torrust_users` table - let user = UserRecordV1 { - user_id: 1, - username: "user01".to_string(), - email: "user01@torrust.com".to_string(), - email_verified: true, - password: hashed_valid_password(), - administrator: true, - }; - let user_id = source_database.insert_user(&user).await.unwrap(); + let user_data_tester = UserDataTester::new( + source_database.clone(), + destiny_database.clone(), + &execution_time, + ); + + user_data_tester.load_data_into_source_db().await; // `torrust_tracker_keys` table @@ -58,21 +59,13 @@ async fn upgrade_data_from_version_v1_0_0_to_v2_0_0() { destiny_database_file: destiny_database_file.clone(), upload_path: format!("{}uploads/", fixtures_dir), }; - let now = datetime_iso_8601(); - upgrade(&args, &now).await; + upgrade(&args, &execution_time).await; // Assertions in database v2 - let destiny_database = destiny_db_connection(&destiny_database_file).await; - // `torrust_users` table - let imported_user = destiny_database.get_user(user_id).await.unwrap(); - - assert_eq!(imported_user.user_id, user.user_id); - assert!(imported_user.date_registered.is_none()); - assert_eq!(imported_user.date_imported.unwrap(), now); - assert_eq!(imported_user.administrator, user.administrator); + user_data_tester.assert().await; // `torrust_user_authentication` table @@ -117,24 +110,3 @@ fn reset_databases(source_database_file: &str, destiny_database_file: &str) { fs::remove_file(&source_database_file).expect("Can't remove source DB file."); fs::remove_file(&destiny_database_file).expect("Can't remove destiny DB file."); } - -fn hashed_valid_password() -> String { - hash_password(&valid_password()) -} - -fn valid_password() -> String { - "123456".to_string() -} - -fn hash_password(plain_password: &str) -> String { - let salt = SaltString::generate(&mut OsRng); - - // Argon2 with default params (Argon2id v19) - let argon2 = Argon2::default(); - - // Hash password to PHC string ($argon2id$v=19$...) - argon2 - .hash_password(plain_password.as_bytes(), &salt) - .unwrap() - .to_string() -} From 0a58b6cbe6b1d64ab5615641c5fc853209176a21 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 12:15:00 +0000 Subject: [PATCH 34/53] fix: [#56] bio and avatar is user profile should be NULL for imported users --- .../from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs | 6 +----- src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 7 +------ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 21dc28ff..b7d1a570 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -136,17 +136,13 @@ impl SqliteDatabaseV2_0_0 { user_id: i64, username: &str, email: &str, - email_verified: bool, - bio: &str, - avatar: &str, + email_verified: bool ) -> Result { query("INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)") .bind(user_id) .bind(username) .bind(email) .bind(email_verified) - .bind(bio) - .bind(avatar) .execute(&self.pool) .await .map(|v| v.last_insert_rowid()) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index bf3754fe..48048973 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -181,7 +181,7 @@ async fn transfer_user_data( ); let id = dest_database - .insert_imported_user(user.user_id, &date_imported, user.administrator) + .insert_imported_user(user.user_id, date_imported, user.administrator) .await .unwrap(); @@ -204,17 +204,12 @@ async fn transfer_user_data( &user.username, &user.user_id ); - let default_user_bio = "".to_string(); - let default_user_avatar = "".to_string(); - dest_database .insert_user_profile( user.user_id, &user.username, &user.email, user.email_verified, - &default_user_bio, - &default_user_avatar, ) .await .unwrap(); From 8d74e6683c15b67674a652fc2df05d36a8dfe1fd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 12:28:56 +0000 Subject: [PATCH 35/53] tests: [#56] for users profile and auth tables in upgrader --- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 35 +++++++++++++++++ .../testers/user_data_tester.rs | 39 +++++++++++++++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 8 ++++ 3 files changed, 82 insertions(+) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index ba6f4831..87363cea 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -10,6 +10,22 @@ pub struct UserRecordV2 { pub administrator: bool, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserProfileRecordV2 { + pub user_id: i64, + pub username: String, + pub email: String, + pub email_verified: bool, + pub bio: Option, + pub avatar: Option, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct UserAuthenticationRecordV2 { + pub user_id: i64, + pub password_hash: String, +} + pub struct SqliteDatabaseV2_0_0 { pub pool: SqlitePool, } @@ -34,4 +50,23 @@ impl SqliteDatabaseV2_0_0 { .fetch_one(&self.pool) .await } + + pub async fn get_user_profile(&self, user_id: i64) -> Result { + query_as::<_, UserProfileRecordV2>("SELECT * FROM torrust_user_profiles WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_user_authentication( + &self, + user_id: i64, + ) -> Result { + query_as::<_, UserAuthenticationRecordV2>( + "SELECT * FROM torrust_user_authentication WHERE user_id = ?", + ) + .bind(user_id) + .fetch_one(&self.pool) + .await + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs index a83b8077..3f70081c 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs @@ -49,6 +49,8 @@ impl UserDataTester { pub async fn assert(&self) { self.assert_user().await; + self.assert_user_profile().await; + self.assert_user_authentication().await; } /// Table `torrust_users` @@ -67,6 +69,43 @@ impl UserDataTester { self.test_data.user.administrator ); } + + /// Table `torrust_user_profiles` + async fn assert_user_profile(&self) { + let imported_user_profile = self + .destiny_database + .get_user_profile(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!(imported_user_profile.user_id, self.test_data.user.user_id); + assert_eq!(imported_user_profile.username, self.test_data.user.username); + assert_eq!(imported_user_profile.email, self.test_data.user.email); + assert_eq!( + imported_user_profile.email_verified, + self.test_data.user.email_verified + ); + assert!(imported_user_profile.bio.is_none()); + assert!(imported_user_profile.avatar.is_none()); + } + + /// Table `torrust_user_profiles` + async fn assert_user_authentication(&self) { + let imported_user_authentication = self + .destiny_database + .get_user_authentication(self.test_data.user.user_id) + .await + .unwrap(); + + assert_eq!( + imported_user_authentication.user_id, + self.test_data.user.user_id + ); + assert_eq!( + imported_user_authentication.password_hash, + self.test_data.user.password + ); + } } fn hashed_valid_password() -> String { diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 4e9d4228..b0976944 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -1,8 +1,16 @@ //! You can run this test with: //! +//! //! ```text +//! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 +//! ``` +//! +//! or: +//! //! ```text //! cargo test upgrades_data_from_version_v1_0_0_to_v2_0_0 -- --nocapture //! ``` +//! +//! to see the "upgrader" command output. use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_data_tester::UserDataTester; From eef980c3ce9c529c5c37fe512a8afdc4a24e3a2d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 12:55:27 +0000 Subject: [PATCH 36/53] tests: [#56] for tracker keys table in upgrader --- .../from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 18 +++++- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 20 ++++++ .../from_v1_0_0_to_v2_0_0/testers/mod.rs | 1 + .../testers/tracker_keys_tester.rs | 62 +++++++++++++++++++ .../testers/user_data_tester.rs | 2 +- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 23 ++++--- 6 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs index 6da98170..cc286a20 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -1,7 +1,9 @@ use sqlx::sqlite::SqlitePoolOptions; use sqlx::{query, SqlitePool}; use std::fs; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ + TrackerKeyRecordV1, UserRecordV1, +}; pub struct SqliteDatabaseV1_0_0 { pub pool: SqlitePool, @@ -65,4 +67,18 @@ impl SqliteDatabaseV1_0_0 { .await .map(|v| v.last_insert_rowid()) } + + pub async fn insert_tracker_key( + &self, + tracker_key: &TrackerKeyRecordV1, + ) -> Result { + query("INSERT INTO torrust_tracker_keys (key_id, user_id, key, valid_until) VALUES (?, ?, ?, ?)") + .bind(tracker_key.key_id) + .bind(tracker_key.user_id) + .bind(tracker_key.key.clone()) + .bind(tracker_key.valid_until) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index 87363cea..1f3c25a7 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -26,6 +26,14 @@ pub struct UserAuthenticationRecordV2 { pub password_hash: String, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TrackerKeyRecordV2 { + pub tracker_key_id: i64, + pub user_id: i64, + pub tracker_key: String, + pub date_expiry: i64, +} + pub struct SqliteDatabaseV2_0_0 { pub pool: SqlitePool, } @@ -69,4 +77,16 @@ impl SqliteDatabaseV2_0_0 { .fetch_one(&self.pool) .await } + + pub async fn get_tracker_key( + &self, + tracker_key_id: i64, + ) -> Result { + query_as::<_, TrackerKeyRecordV2>( + "SELECT * FROM torrust_tracker_keys WHERE user_id = ?", + ) + .bind(tracker_key_id) + .fetch_one(&self.pool) + .await + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs index 85968bfd..7285ed3c 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs @@ -1 +1,2 @@ +pub mod tracker_keys_tester; pub mod user_data_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs new file mode 100644 index 00000000..dd6eefdb --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs @@ -0,0 +1,62 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::TrackerKeyRecordV1; + +pub struct TrackerKeysTester { + source_database: Arc, + destiny_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub tracker_key: TrackerKeyRecordV1, +} + +impl TrackerKeysTester { + pub fn new( + source_database: Arc, + destiny_database: Arc, + user_id: i64, + ) -> Self { + let tracker_key = TrackerKeyRecordV1 { + key_id: 1, + user_id, + key: "rRstSTM5rx0sgxjLkRSJf3rXODcRBI5T".to_string(), + valid_until: 2456956800, // 11-10-2047 00:00:00 UTC + }; + + Self { + source_database, + destiny_database, + test_data: TestData { tracker_key }, + } + } + + pub async fn load_data_into_source_db(&self) { + self.source_database + .insert_tracker_key(&self.test_data.tracker_key) + .await + .unwrap(); + } + + /// Table `torrust_tracker_keys` + pub async fn assert(&self) { + let imported_key = self + .destiny_database + .get_tracker_key(self.test_data.tracker_key.key_id) + .await + .unwrap(); + + assert_eq!( + imported_key.tracker_key_id, + self.test_data.tracker_key.key_id + ); + assert_eq!(imported_key.user_id, self.test_data.tracker_key.user_id); + assert_eq!(imported_key.tracker_key, self.test_data.tracker_key.key); + assert_eq!( + imported_key.date_expiry, + self.test_data.tracker_key.valid_until + ); + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs index 3f70081c..1f6f7238 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs @@ -10,7 +10,7 @@ pub struct UserDataTester { source_database: Arc, destiny_database: Arc, execution_time: String, - test_data: TestData, + pub test_data: TestData, } pub struct TestData { diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index b0976944..d0314328 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -13,6 +13,7 @@ //! to see the "upgrader" command output. use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_keys_tester::TrackerKeysTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_data_tester::UserDataTester; use std::fs; use std::sync::Arc; @@ -43,7 +44,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Load data into database v1 - // `torrust_users` table + // `torrust_users`, `torrust_user_profiles` and `torrust_user_authentication` tables let user_data_tester = UserDataTester::new( source_database.clone(), @@ -55,7 +56,13 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // `torrust_tracker_keys` table - // TODO + let tracker_keys_tester = TrackerKeysTester::new( + source_database.clone(), + destiny_database.clone(), + user_data_tester.test_data.user.user_id, + ); + + tracker_keys_tester.load_data_into_source_db().await; // `torrust_torrents` table @@ -71,21 +78,13 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Assertions in database v2 - // `torrust_users` table + // `torrust_users`, `torrust_user_profiles` and `torrust_user_authentication` tables user_data_tester.assert().await; - // `torrust_user_authentication` table - - // TODO - - // `torrust_user_profiles` table - - // TODO - // `torrust_tracker_keys` table - // TODO + tracker_keys_tester.assert().await; // `torrust_torrents` table From f0f581faebb48823e46edb0408214591152d0225 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 17:46:27 +0000 Subject: [PATCH 37/53] tests: [#56] for torrents table in upgrader --- src/models/torrent_file.rs | 4 +- .../databases/sqlite_v1_0_0.rs | 2 +- .../databases/sqlite_v2_0_0.rs | 5 +- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 10 +- .../fixtures/uploads/1.torrent | Bin 0 -> 1128 bytes .../from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 32 ++++- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 18 ++- .../from_v1_0_0_to_v2_0_0/testers/mod.rs | 1 + .../testers/torrent_tester.rs | 115 ++++++++++++++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 42 ++----- 10 files changed, 183 insertions(+), 46 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs diff --git a/src/models/torrent_file.rs b/src/models/torrent_file.rs index c7ab26a7..62319036 100644 --- a/src/models/torrent_file.rs +++ b/src/models/torrent_file.rs @@ -45,14 +45,14 @@ impl TorrentInfo { pub fn get_pieces_as_string(&self) -> String { match &self.pieces { None => "".to_string(), - Some(byte_buf) => bytes_to_hex(byte_buf.as_ref()) + Some(byte_buf) => bytes_to_hex(byte_buf.as_ref()), } } pub fn get_root_hash_as_i64(&self) -> i64 { match &self.root_hash { None => 0i64, - Some(root_hash) => root_hash.parse::().unwrap() + Some(root_hash) => root_hash.parse::().unwrap(), } } } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index 3328fd43..3d42a4b3 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -10,7 +10,7 @@ pub struct CategoryRecord { pub name: String, } -#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, Clone)] pub struct UserRecordV1 { pub user_id: i64, pub username: String, diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index b7d1a570..bee97bc2 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -14,6 +14,7 @@ pub struct CategoryRecordV2 { pub name: String, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct TorrentRecordV2 { pub torrent_id: i64, pub uploader_id: i64, @@ -50,7 +51,7 @@ impl TorrentRecordV2 { } } -fn convert_timestamp_to_datetime(timestamp: i64) -> String { +pub fn convert_timestamp_to_datetime(timestamp: i64) -> String { // The expected format in database is: 2022-11-04 09:53:57 // MySQL uses a DATETIME column and SQLite uses a TEXT column. @@ -136,7 +137,7 @@ impl SqliteDatabaseV2_0_0 { user_id: i64, username: &str, email: &str, - email_verified: bool + email_verified: bool, ) -> Result { query("INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)") .bind(user_id) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 48048973..cfb17be9 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -326,7 +326,13 @@ async fn transfer_torrents( let filepath = format!("{}/{}.torrent", upload_path, &torrent.torrent_id); - let torrent_from_file = read_torrent_from_file(&filepath).unwrap(); + let torrent_from_file_result = read_torrent_from_file(&filepath); + + if torrent_from_file_result.is_err() { + panic!("Error torrent file not found: {:?}", &filepath); + } + + let torrent_from_file = torrent_from_file_result.unwrap(); let id = dest_database .insert_torrent(&TorrentRecordV2::from_v1_data( @@ -463,7 +469,7 @@ async fn transfer_torrents( println!("Torrents transferred"); } -fn read_torrent_from_file(path: &str) -> Result> { +pub fn read_torrent_from_file(path: &str) -> Result> { let contents = match fs::read(path) { Ok(contents) => contents, Err(e) => return Err(e.into()), diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/1.torrent new file mode 100644 index 0000000000000000000000000000000000000000..faa30f4cda2290bada6c2462f8e7782dae6246b3 GIT binary patch literal 1128 zcmYeXuu9C!%P-AKPBk&GDorV{($_C3N=(j9Ez$+@^wN_uOG@-I^V0IIEDZEvN)3(S zx^#0gi%W8HNYHF#3AHCDKRGccBfq%B%FNQt5^lJWIaJ=z$Xw4r&k$8MHPzfIIX^cy zHLt|T%*r=0FC{f6sVKihAvm=}!N|bGq5?z|R2UjqB^RY8mZYXABvl$(SQR<}J(6Ek z1k|TcW};`JX9)BJSVd-jo>eWrI%!68k!pcB~uLztqL+zlT(Woit_VIjE$_$-KiF4 ztKjTSVv9M{p>vJ(hyAt3*B_jVX)O|{y~?ASmztW2VTO@aZfZ%QLRo52G0>HnMnHGv zCFZ8$af+c4#99TIYs`!-On?SM+yZo&Rqez=-nqWJI%bC`gj`iC>Wyl%f2erB{P_3X zIpxz7(^M^v-aNayo8x_t%Ir;FkDS%I&wYUB!`k~r6P|8!^ij@npWQd#Z50o1-^JUU+ob&py3nhBbRX{CuSR^Gw6~5C)c?t(h|Aod?VOuC3aixa?5F%+=fC z6Z9QcsO~dbY&(~or!r&4mAT2D>lGr>)beL!=Ng%ut>qAnle)mb8NS-US|Q=!9ijGX zpP#$>7o>BV8HRpR_?EL&G+(E4YNl9lW3A!GZF|j~&IodE`lIc+v~}B`vJ#icsR-{U zRssVpg@|xBHZ`(Z?%Km>dsE(G;~L+@gzf$}B4VQ@nfr?F{}yw$H~#D>a^kZZyJX*t zi}nv&llRvAX)`ywz3zXq;Kj*`yt|GZ5U(-P^X)%#}n zZEMQkXtY&_&!Rv&L)-JlR|h$}Xx0_Xj-MCLbd>N{W$T{5Vr4x?K=}Xgq?naEzBQKh ztxj>R-kro?<9WNjDM;SoT-94uljNBAWeG>?*ZwcrfAiSI*(bB>?)83XxFly?GTC8H zLTC43DTcQ-jQYpePJ|Yp2v2R6xqs+c_t|eh&GPw6uI@Z;OCv#fOzfC z2FwaYvz<7PD13Z;rTF0I-TX}zq4N`8)r;iv{t- Result { + query( + "INSERT INTO torrust_torrents ( + torrent_id, + uploader, + info_hash, + title, + category_id, + description, + upload_date, + file_size, + seeders, + leechers + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + ) + .bind(torrent.torrent_id) + .bind(torrent.uploader.clone()) + .bind(torrent.info_hash.clone()) + .bind(torrent.title.clone()) + .bind(torrent.category_id) + .bind(torrent.description.clone()) + .bind(torrent.upload_date) + .bind(torrent.file_size) + .bind(torrent.seeders) + .bind(torrent.leechers) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index 1f3c25a7..17331572 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -1,6 +1,7 @@ use serde::{Deserialize, Serialize}; use sqlx::sqlite::SqlitePoolOptions; use sqlx::{query_as, SqlitePool}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::TorrentRecordV2; #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct UserRecordV2 { @@ -82,11 +83,16 @@ impl SqliteDatabaseV2_0_0 { &self, tracker_key_id: i64, ) -> Result { - query_as::<_, TrackerKeyRecordV2>( - "SELECT * FROM torrust_tracker_keys WHERE user_id = ?", - ) - .bind(tracker_key_id) - .fetch_one(&self.pool) - .await + query_as::<_, TrackerKeyRecordV2>("SELECT * FROM torrust_tracker_keys WHERE user_id = ?") + .bind(tracker_key_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent(&self, torrent_id: i64) -> Result { + query_as::<_, TorrentRecordV2>("SELECT * FROM torrust_torrents WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_one(&self.pool) + .await } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs index 7285ed3c..6445ec5b 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs @@ -1,2 +1,3 @@ +pub mod torrent_tester; pub mod tracker_keys_tester; pub mod user_data_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs new file mode 100644 index 00000000..d6c14045 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -0,0 +1,115 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; +use torrust_index_backend::models::torrent_file::Torrent; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ + TorrentRecordV1, UserRecordV1, +}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::convert_timestamp_to_datetime; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::read_torrent_from_file; + +pub struct TorrentTester { + source_database: Arc, + destiny_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub torrent: TorrentRecordV1, + pub user: UserRecordV1, +} + +impl TorrentTester { + pub fn new( + source_database: Arc, + destiny_database: Arc, + user: &UserRecordV1, + ) -> Self { + let torrent = TorrentRecordV1 { + torrent_id: 1, + uploader: user.username.clone(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + title: "title".to_string(), + category_id: 1, + description: "description".to_string(), + upload_date: 1667546358, // 2022-11-04 07:19:18 + file_size: 9219566, + seeders: 0, + leechers: 0, + }; + + Self { + source_database, + destiny_database, + test_data: TestData { + torrent, + user: user.clone(), + }, + } + } + + pub async fn load_data_into_source_db(&self) { + self.source_database + .insert_torrent(&self.test_data.torrent) + .await + .unwrap(); + } + + pub async fn assert(&self, upload_path: &str) { + let filepath = self.torrent_file_path(upload_path, self.test_data.torrent.torrent_id); + let torrent_file = read_torrent_from_file(&filepath).unwrap(); + + self.assert_torrent(&torrent_file).await; + // TODO + // `torrust_torrent_files`, + // `torrust_torrent_info` + // `torrust_torrent_announce_urls` + } + + pub fn torrent_file_path(&self, upload_path: &str, torrent_id: i64) -> String { + format!("{}/{}.torrent", &upload_path, &torrent_id) + } + + /// Table `torrust_torrents` + async fn assert_torrent(&self, torrent_file: &Torrent) { + let imported_torrent = self + .destiny_database + .get_torrent(self.test_data.torrent.torrent_id) + .await + .unwrap(); + + assert_eq!( + imported_torrent.torrent_id, + self.test_data.torrent.torrent_id + ); + assert_eq!(imported_torrent.uploader_id, self.test_data.user.user_id); + assert_eq!( + imported_torrent.category_id, + self.test_data.torrent.category_id + ); + assert_eq!(imported_torrent.info_hash, self.test_data.torrent.info_hash); + assert_eq!(imported_torrent.size, self.test_data.torrent.file_size); + assert_eq!(imported_torrent.name, torrent_file.info.name); + assert_eq!( + imported_torrent.pieces, + torrent_file.info.get_pieces_as_string() + ); + assert_eq!( + imported_torrent.piece_length, + torrent_file.info.piece_length + ); + if torrent_file.info.private.is_none() { + assert_eq!(imported_torrent.private, Some(0)); + } else { + assert_eq!(imported_torrent.private, torrent_file.info.private); + } + assert_eq!( + imported_torrent.root_hash, + torrent_file.info.get_root_hash_as_i64() + ); + assert_eq!( + imported_torrent.date_uploaded, + convert_timestamp_to_datetime(self.test_data.torrent.upload_date) + ); + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index d0314328..22093624 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -13,6 +13,7 @@ //! to see the "upgrader" command output. use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::torrent_tester::TorrentTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_keys_tester::TrackerKeysTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_data_tester::UserDataTester; use std::fs; @@ -26,6 +27,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Directories let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); + let upload_path = format!("{}uploads/", &fixtures_dir); // Files let source_database_file = format!("{}source.db", output_dir); @@ -44,63 +46,40 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Load data into database v1 - // `torrust_users`, `torrust_user_profiles` and `torrust_user_authentication` tables - let user_data_tester = UserDataTester::new( source_database.clone(), destiny_database.clone(), &execution_time, ); - user_data_tester.load_data_into_source_db().await; - // `torrust_tracker_keys` table - let tracker_keys_tester = TrackerKeysTester::new( source_database.clone(), destiny_database.clone(), user_data_tester.test_data.user.user_id, ); - tracker_keys_tester.load_data_into_source_db().await; - // `torrust_torrents` table - - // TODO + let torrent_tester = TorrentTester::new( + source_database.clone(), + destiny_database.clone(), + &user_data_tester.test_data.user, + ); + torrent_tester.load_data_into_source_db().await; // Run the upgrader let args = Arguments { source_database_file: source_database_file.clone(), destiny_database_file: destiny_database_file.clone(), - upload_path: format!("{}uploads/", fixtures_dir), + upload_path: upload_path.clone(), }; upgrade(&args, &execution_time).await; // Assertions in database v2 - // `torrust_users`, `torrust_user_profiles` and `torrust_user_authentication` tables - user_data_tester.assert().await; - - // `torrust_tracker_keys` table - tracker_keys_tester.assert().await; - - // `torrust_torrents` table - - // TODO - - // `torrust_torrent_files` table - - // TODO - - // `torrust_torrent_info` table - - // TODO - - // `torrust_torrent_announce_urls` table - - // TODO + torrent_tester.assert(&upload_path).await; } async fn source_db_connection(source_database_file: &str) -> Arc { @@ -113,7 +92,6 @@ async fn destiny_db_connection(destiny_database_file: &str) -> Arc Date: Thu, 10 Nov 2022 17:50:03 +0000 Subject: [PATCH 38/53] refactor: [#56] rename mod and variables --- .../from_v1_0_0_to_v2_0_0/testers/mod.rs | 4 ++-- ...r_keys_tester.rs => tracker_key_tester.rs} | 4 ++-- .../{user_data_tester.rs => user_tester.rs} | 4 ++-- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 20 +++++++++---------- 4 files changed, 16 insertions(+), 16 deletions(-) rename tests/upgrades/from_v1_0_0_to_v2_0_0/testers/{tracker_keys_tester.rs => tracker_key_tester.rs} (97%) rename tests/upgrades/from_v1_0_0_to_v2_0_0/testers/{user_data_tester.rs => user_tester.rs} (98%) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs index 6445ec5b..730b5149 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs @@ -1,3 +1,3 @@ pub mod torrent_tester; -pub mod tracker_keys_tester; -pub mod user_data_tester; +pub mod tracker_key_tester; +pub mod user_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs similarity index 97% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs index dd6eefdb..68b591a8 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_keys_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs @@ -3,7 +3,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use std::sync::Arc; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::TrackerKeyRecordV1; -pub struct TrackerKeysTester { +pub struct TrackerKeyTester { source_database: Arc, destiny_database: Arc, test_data: TestData, @@ -13,7 +13,7 @@ pub struct TestData { pub tracker_key: TrackerKeyRecordV1, } -impl TrackerKeysTester { +impl TrackerKeyTester { pub fn new( source_database: Arc, destiny_database: Arc, diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs similarity index 98% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs index 1f6f7238..e0d001f8 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_data_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs @@ -6,7 +6,7 @@ use rand_core::OsRng; use std::sync::Arc; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; -pub struct UserDataTester { +pub struct UserTester { source_database: Arc, destiny_database: Arc, execution_time: String, @@ -17,7 +17,7 @@ pub struct TestData { pub user: UserRecordV1, } -impl UserDataTester { +impl UserTester { pub fn new( source_database: Arc, destiny_database: Arc, diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 22093624..ccda3537 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -14,8 +14,8 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::torrent_tester::TorrentTester; -use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_keys_tester::TrackerKeysTester; -use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_data_tester::UserDataTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_key_tester::TrackerKeyTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_tester::UserTester; use std::fs; use std::sync::Arc; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{ @@ -46,24 +46,24 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Load data into database v1 - let user_data_tester = UserDataTester::new( + let user_tester = UserTester::new( source_database.clone(), destiny_database.clone(), &execution_time, ); - user_data_tester.load_data_into_source_db().await; + user_tester.load_data_into_source_db().await; - let tracker_keys_tester = TrackerKeysTester::new( + let tracker_key_tester = TrackerKeyTester::new( source_database.clone(), destiny_database.clone(), - user_data_tester.test_data.user.user_id, + user_tester.test_data.user.user_id, ); - tracker_keys_tester.load_data_into_source_db().await; + tracker_key_tester.load_data_into_source_db().await; let torrent_tester = TorrentTester::new( source_database.clone(), destiny_database.clone(), - &user_data_tester.test_data.user, + &user_tester.test_data.user, ); torrent_tester.load_data_into_source_db().await; @@ -77,8 +77,8 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // Assertions in database v2 - user_data_tester.assert().await; - tracker_keys_tester.assert().await; + user_tester.assert().await; + tracker_key_tester.assert().await; torrent_tester.assert(&upload_path).await; } From 00632890f35f26f8f4547850fa5a42708d5f39e0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 22:19:06 +0000 Subject: [PATCH 39/53] tests: [#56] for torrents info and announce urls tables in upgrader --- .../databases/sqlite_v1_0_0.rs | 2 +- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 38 ++++++++++++++++ .../testers/torrent_tester.rs | 43 +++++++++++++++++-- 3 files changed, 79 insertions(+), 4 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index 3d42a4b3..a5743000 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -35,7 +35,7 @@ pub struct TorrentRecordV1 { pub info_hash: String, pub title: String, pub category_id: i64, - pub description: String, + pub description: Option, pub upload_date: i64, pub file_size: i64, pub seeders: i64, diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index 17331572..2f0ba395 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -35,6 +35,20 @@ pub struct TrackerKeyRecordV2 { pub date_expiry: i64, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct TorrentInfoRecordV2 { + pub torrent_id: i64, + pub title: String, + pub description: Option, +} + +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, PartialEq)] +pub struct TorrentAnnounceUrlV2 { + pub announce_url_id: i64, + pub torrent_id: i64, + pub tracker_url: String, +} + pub struct SqliteDatabaseV2_0_0 { pub pool: SqlitePool, } @@ -95,4 +109,28 @@ impl SqliteDatabaseV2_0_0 { .fetch_one(&self.pool) .await } + + pub async fn get_torrent_info( + &self, + torrent_id: i64, + ) -> Result { + query_as::<_, TorrentInfoRecordV2>( + "SELECT * FROM torrust_torrent_info WHERE torrent_id = ?", + ) + .bind(torrent_id) + .fetch_one(&self.pool) + .await + } + + pub async fn get_torrent_announce_urls( + &self, + torrent_id: i64, + ) -> Result, sqlx::Error> { + query_as::<_, TorrentAnnounceUrlV2>( + "SELECT * FROM torrust_torrent_announce_urls WHERE torrent_id = ?", + ) + .bind(torrent_id) + .fetch_all(&self.pool) + .await + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index d6c14045..33ea8b1a 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -31,7 +31,7 @@ impl TorrentTester { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), title: "title".to_string(), category_id: 1, - description: "description".to_string(), + description: Some("description".to_string()), upload_date: 1667546358, // 2022-11-04 07:19:18 file_size: 9219566, seeders: 0, @@ -60,10 +60,10 @@ impl TorrentTester { let torrent_file = read_torrent_from_file(&filepath).unwrap(); self.assert_torrent(&torrent_file).await; + self.assert_torrent_info().await; + self.assert_torrent_announce_urls(&torrent_file).await; // TODO // `torrust_torrent_files`, - // `torrust_torrent_info` - // `torrust_torrent_announce_urls` } pub fn torrent_file_path(&self, upload_path: &str, torrent_id: i64) -> String { @@ -112,4 +112,41 @@ impl TorrentTester { convert_timestamp_to_datetime(self.test_data.torrent.upload_date) ); } + + /// Table `torrust_torrent_info` + async fn assert_torrent_info(&self) { + let torrent_info = self + .destiny_database + .get_torrent_info(self.test_data.torrent.torrent_id) + .await + .unwrap(); + + assert_eq!(torrent_info.torrent_id, self.test_data.torrent.torrent_id); + assert_eq!(torrent_info.title, self.test_data.torrent.title); + assert_eq!(torrent_info.description, self.test_data.torrent.description); + } + + /// Table `torrust_torrent_announce_urls` + async fn assert_torrent_announce_urls(&self, torrent_file: &Torrent) { + let torrent_announce_urls = self + .destiny_database + .get_torrent_announce_urls(self.test_data.torrent.torrent_id) + .await + .unwrap(); + + let urls: Vec = torrent_announce_urls + .iter() + .map(|torrent_announce_url| torrent_announce_url.tracker_url.to_string()) + .collect(); + + let expected_urls = torrent_file + .announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>(); + + assert_eq!(urls, expected_urls); + } } From 750969dce57729f13d60e10db8e550c9ef03b627 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 22:25:15 +0000 Subject: [PATCH 40/53] refactor: [#56] rename methods --- .../testers/torrent_tester.rs | 2 +- .../testers/tracker_key_tester.rs | 2 +- .../testers/user_tester.rs | 2 +- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 25 +++++++++++-------- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index 33ea8b1a..3f636506 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -55,7 +55,7 @@ impl TorrentTester { .unwrap(); } - pub async fn assert(&self, upload_path: &str) { + pub async fn assert_data_in_destiny_db(&self, upload_path: &str) { let filepath = self.torrent_file_path(upload_path, self.test_data.torrent.torrent_id); let torrent_file = read_torrent_from_file(&filepath).unwrap(); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs index 68b591a8..3dfa4904 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs @@ -41,7 +41,7 @@ impl TrackerKeyTester { } /// Table `torrust_tracker_keys` - pub async fn assert(&self) { + pub async fn assert_data_in_destiny_db(&self) { let imported_key = self .destiny_database .get_tracker_key(self.test_data.tracker_key.key_id) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs index e0d001f8..d349a47f 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs @@ -47,7 +47,7 @@ impl UserTester { .unwrap(); } - pub async fn assert(&self) { + pub async fn assert_data_in_destiny_db(&self) { self.assert_user().await; self.assert_user_profile().await; self.assert_user_authentication().await; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index ccda3537..8f3c33ca 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -44,7 +44,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // The datetime when the upgrader is executed let execution_time = datetime_iso_8601(); - // Load data into database v1 + // Load data into source database in version v1.0.0 let user_tester = UserTester::new( source_database.clone(), @@ -68,18 +68,21 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { torrent_tester.load_data_into_source_db().await; // Run the upgrader - let args = Arguments { - source_database_file: source_database_file.clone(), - destiny_database_file: destiny_database_file.clone(), - upload_path: upload_path.clone(), - }; - upgrade(&args, &execution_time).await; + upgrade( + &Arguments { + source_database_file: source_database_file.clone(), + destiny_database_file: destiny_database_file.clone(), + upload_path: upload_path.clone(), + }, + &execution_time, + ) + .await; - // Assertions in database v2 + // Assertions for data transferred to the new database in version v2.0.0 - user_tester.assert().await; - tracker_key_tester.assert().await; - torrent_tester.assert(&upload_path).await; + user_tester.assert_data_in_destiny_db().await; + tracker_key_tester.assert_data_in_destiny_db().await; + torrent_tester.assert_data_in_destiny_db(&upload_path).await; } async fn source_db_connection(source_database_file: &str) -> Arc { From 82b84a3633e287829c2b290b7cc1857002a3c6e2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Nov 2022 22:47:25 +0000 Subject: [PATCH 41/53] refactor: [#56] extract test configuration --- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 89 ++++++++++++------- 1 file changed, 55 insertions(+), 34 deletions(-) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 8f3c33ca..a40f0a37 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -22,67 +22,88 @@ use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{ datetime_iso_8601, upgrade, Arguments, }; -#[tokio::test] -async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { +struct TestConfig { // Directories - let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); - let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); - let upload_path = format!("{}uploads/", &fixtures_dir); - + pub fixtures_dir: String, + pub upload_path: String, // Files - let source_database_file = format!("{}source.db", output_dir); - let destiny_database_file = format!("{}destiny.db", output_dir); + pub source_database_file: String, + pub destiny_database_file: String, +} - // Set up clean source database - reset_databases(&source_database_file, &destiny_database_file); - let source_database = source_db_connection(&source_database_file).await; - source_database.migrate(&fixtures_dir).await; +impl Default for TestConfig { + fn default() -> Self { + let fixtures_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/".to_string(); + let upload_path = format!("{}uploads/", &fixtures_dir); + let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); + let source_database_file = format!("{}source.db", output_dir); + let destiny_database_file = format!("{}destiny.db", output_dir); + Self { + fixtures_dir, + upload_path, + source_database_file, + destiny_database_file, + } + } +} - // Set up connection for the destiny database - let destiny_database = destiny_db_connection(&destiny_database_file).await; +#[tokio::test] +async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { + let config = TestConfig::default(); + + let (source_db, dest_db) = setup_databases(&config).await; // The datetime when the upgrader is executed let execution_time = datetime_iso_8601(); - // Load data into source database in version v1.0.0 - - let user_tester = UserTester::new( - source_database.clone(), - destiny_database.clone(), - &execution_time, - ); - user_tester.load_data_into_source_db().await; - + let user_tester = UserTester::new(source_db.clone(), dest_db.clone(), &execution_time); let tracker_key_tester = TrackerKeyTester::new( - source_database.clone(), - destiny_database.clone(), + source_db.clone(), + dest_db.clone(), user_tester.test_data.user.user_id, ); - tracker_key_tester.load_data_into_source_db().await; - let torrent_tester = TorrentTester::new( - source_database.clone(), - destiny_database.clone(), + source_db.clone(), + dest_db.clone(), &user_tester.test_data.user, ); + + // Load data into source database in version v1.0.0 + user_tester.load_data_into_source_db().await; + tracker_key_tester.load_data_into_source_db().await; torrent_tester.load_data_into_source_db().await; // Run the upgrader upgrade( &Arguments { - source_database_file: source_database_file.clone(), - destiny_database_file: destiny_database_file.clone(), - upload_path: upload_path.clone(), + source_database_file: config.source_database_file.clone(), + destiny_database_file: config.destiny_database_file.clone(), + upload_path: config.upload_path.clone(), }, &execution_time, ) .await; // Assertions for data transferred to the new database in version v2.0.0 - user_tester.assert_data_in_destiny_db().await; tracker_key_tester.assert_data_in_destiny_db().await; - torrent_tester.assert_data_in_destiny_db(&upload_path).await; + torrent_tester + .assert_data_in_destiny_db(&config.upload_path) + .await; +} + +async fn setup_databases( + config: &TestConfig, +) -> (Arc, Arc) { + // Set up clean source database + reset_databases(&config.source_database_file, &config.destiny_database_file); + let source_database = source_db_connection(&config.source_database_file).await; + source_database.migrate(&config.fixtures_dir).await; + + // Set up connection for the destiny database + let destiny_database = destiny_db_connection(&config.destiny_database_file).await; + + (source_database, destiny_database) } async fn source_db_connection(source_database_file: &str) -> Arc { From afffaefc62f21070ee9a9aa4e3f98367142dad39 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Nov 2022 10:15:16 +0000 Subject: [PATCH 42/53] tests: [#56] for torrents files table in upgrader --- src/models/torrent_file.rs | 25 ++++++++++++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 5 +-- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 19 +++++++++++ .../testers/torrent_tester.rs | 33 ++++++++++++++----- 4 files changed, 69 insertions(+), 13 deletions(-) diff --git a/src/models/torrent_file.rs b/src/models/torrent_file.rs index 62319036..6e015d1a 100644 --- a/src/models/torrent_file.rs +++ b/src/models/torrent_file.rs @@ -55,6 +55,14 @@ impl TorrentInfo { Some(root_hash) => root_hash.parse::().unwrap(), } } + + pub fn is_a_single_file_torrent(&self) -> bool { + self.length.is_some() + } + + pub fn is_a_multiple_file_torrent(&self) -> bool { + self.files.is_some() + } } #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -192,6 +200,23 @@ impl Torrent { } } } + + pub fn announce_urls(&self) -> Vec { + self.announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>() + } + + pub fn is_a_single_file_torrent(&self) -> bool { + self.info.is_a_single_file_torrent() + } + + pub fn is_a_multiple_file_torrent(&self) -> bool { + self.info.is_a_multiple_file_torrent() + } } #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize, sqlx::FromRow)] diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index cfb17be9..91e42931 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -359,10 +359,7 @@ async fn transfer_torrents( println!("[v2][torrust_torrent_files] adding torrent files"); - let _is_torrent_with_multiple_files = torrent_from_file.info.files.is_some(); - let is_torrent_with_a_single_file = torrent_from_file.info.length.is_some(); - - if is_torrent_with_a_single_file { + if torrent_from_file.is_a_single_file_torrent() { // The torrent contains only one file then: // - "path" is NULL // - "md5sum" can be NULL diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index 2f0ba395..20a55daa 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -49,6 +49,15 @@ pub struct TorrentAnnounceUrlV2 { pub tracker_url: String, } +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow, PartialEq)] +pub struct TorrentFileV2 { + pub file_id: i64, + pub torrent_id: i64, + pub md5sum: Option, + pub length: i64, + pub path: Option, +} + pub struct SqliteDatabaseV2_0_0 { pub pool: SqlitePool, } @@ -133,4 +142,14 @@ impl SqliteDatabaseV2_0_0 { .fetch_all(&self.pool) .await } + + pub async fn get_torrent_files( + &self, + torrent_id: i64, + ) -> Result, sqlx::Error> { + query_as::<_, TorrentFileV2>("SELECT * FROM torrust_torrent_files WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await + } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index 3f636506..2b6d92b4 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -62,8 +62,7 @@ impl TorrentTester { self.assert_torrent(&torrent_file).await; self.assert_torrent_info().await; self.assert_torrent_announce_urls(&torrent_file).await; - // TODO - // `torrust_torrent_files`, + self.assert_torrent_files(&torrent_file).await; } pub fn torrent_file_path(&self, upload_path: &str, torrent_id: i64) -> String { @@ -139,14 +138,30 @@ impl TorrentTester { .map(|torrent_announce_url| torrent_announce_url.tracker_url.to_string()) .collect(); - let expected_urls = torrent_file - .announce_list - .clone() - .unwrap() - .into_iter() - .flatten() - .collect::>(); + let expected_urls = torrent_file.announce_urls(); assert_eq!(urls, expected_urls); } + + /// Table `torrust_torrent_files` + async fn assert_torrent_files(&self, torrent_file: &Torrent) { + let db_torrent_files = self + .destiny_database + .get_torrent_files(self.test_data.torrent.torrent_id) + .await + .unwrap(); + + if torrent_file.is_a_single_file_torrent() { + let db_torrent_file = &db_torrent_files[0]; + assert_eq!( + db_torrent_file.torrent_id, + self.test_data.torrent.torrent_id + ); + assert!(db_torrent_file.md5sum.is_none()); + assert_eq!(db_torrent_file.length, torrent_file.info.length.unwrap()); + assert!(db_torrent_file.path.is_none()); + } else { + todo!(); + } + } } From ee01e7b475605f767324c94287f91bc647642d5e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Nov 2022 10:44:27 +0000 Subject: [PATCH 43/53] test: [#56] for torrent files table in upgrader (new case) --- src/models/torrent_file.rs | 4 + .../fixtures/uploads/2.torrent | Bin 0 -> 1505 bytes .../testers/torrent_tester.rs | 141 +++++++++++++----- 3 files changed, 105 insertions(+), 40 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent diff --git a/src/models/torrent_file.rs b/src/models/torrent_file.rs index 6e015d1a..ff34be5e 100644 --- a/src/models/torrent_file.rs +++ b/src/models/torrent_file.rs @@ -202,6 +202,10 @@ impl Torrent { } pub fn announce_urls(&self) -> Vec { + if self.announce_list.is_none() { + return vec![self.announce.clone().unwrap()]; + } + self.announce_list .clone() .unwrap() diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent b/tests/upgrades/from_v1_0_0_to_v2_0_0/fixtures/uploads/2.torrent new file mode 100644 index 0000000000000000000000000000000000000000..a62afbff926204bae8fca24d76914ceae1a38b1c GIT binary patch literal 1505 zcmYeXuu9C!%P-AKPBk&GDorV{($_C3N=(j9Ez$+@^wN_uOG@-I^V0IIEDZEvO3kg3 z^K)}k^Gb{@twPH46?_x(Qc`o0itTQrj0{XHDnLX*g+f7Qa!F}XYO$e#RdP{k zVo7R>LQ3asU}t+ zC!|%&NbE__SYU?e{e3QwMd}$DvxSjYAQ}k zjHqZyswvo8#W@%*HnA#5EXl~h=?bt{fni}{l~__z1oR%<%N7Pkpil#AH?h(yNCc_@ zi=~om3D~Qtsi}rWR=KGqi3(+@Ma954$}|F+pO=^m3Zs(pd|gCj=oY7z=)&XO2omTD zu(&WYwlGOGgT{mjFwxacEaaW*yQ^b%h(gF!wW8jrHv5N)_sfre-ucs{JXUo_$AHb)=j9QWCM^W9eQ@b+Dt{{LM@cf85s_Una5 zm;LP1TV`0Z_ruRe%0JIEtPf#e`PrH&Q{H*7%n!S?c=t+=1yk>xi|gM z_FUS!?N3>W%Vcw_f}+eaa7G0u4shO5$VsdOrjHaNa+k5Gk=1h79!A@n@)jG{_$DT7 z_rDPl8!gG)S9Jfkn6tg{XGf6}pVinU`(|9Uf7qJ5x8_fqx!LV?|C0qTPFCdIb>xuf z5z){)FD|*ocmHrw5Asa9mRWu?)aCY;V_!p_>$H7nSLfK?x=h^V@dc>>y@QPw618ke z&ppJ}wgDsU%??e)w|Z5(SIjppk#1VOZ-(Etru>aYTXpy>3Zyf%J#Tzjcy;U_yj)`BE zaI}8y|C0STk6oO7GP~|x?}vsEULyFRQ>)GDz3=CG+*l%36{?xg%dL%S0mWt=}(, user: &UserRecordV1, ) -> Self { - let torrent = TorrentRecordV1 { + let torrent_01 = TorrentRecordV1 { torrent_id: 1, uploader: user.username.clone(), info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), - title: "title".to_string(), + title: "A Mandelbrot Set 2048x2048px picture".to_string(), category_id: 1, - description: Some("description".to_string()), + description: Some( + "A beautiful Mandelbrot Set picture in black and white. \ + - Hybrid torrent V1 and V2. \ + - Single-file torrent. \ + - Public. \ + - More than one tracker URL. \ + " + .to_string(), + ), + upload_date: 1667546358, // 2022-11-04 07:19:18 + file_size: 9219566, + seeders: 0, + leechers: 0, + }; + let torrent_02 = TorrentRecordV1 { + torrent_id: 2, + uploader: user.username.clone(), + info_hash: "0902d375f18ec020f0cc68ed4810023032ba81cb".to_string(), + title: "Two Mandelbrot Set 2048x2048px pictures".to_string(), + category_id: 1, + description: Some( + "Two beautiful Mandelbrot Set pictures in black and white. \ + - Hybrid torrent V1 and V2. \ + - Multiple-files torrent. \ + - Private. + - Only one tracker URL. + " + .to_string(), + ), upload_date: 1667546358, // 2022-11-04 07:19:18 file_size: 9219566, seeders: 0, @@ -42,7 +71,8 @@ impl TorrentTester { source_database, destiny_database, test_data: TestData { - torrent, + torrent_01, + torrent_02, user: user.clone(), }, } @@ -50,19 +80,39 @@ impl TorrentTester { pub async fn load_data_into_source_db(&self) { self.source_database - .insert_torrent(&self.test_data.torrent) + .insert_torrent(&self.test_data.torrent_01) + .await + .unwrap(); + self.source_database + .insert_torrent(&self.test_data.torrent_02) .await .unwrap(); } pub async fn assert_data_in_destiny_db(&self, upload_path: &str) { - let filepath = self.torrent_file_path(upload_path, self.test_data.torrent.torrent_id); - let torrent_file = read_torrent_from_file(&filepath).unwrap(); - - self.assert_torrent(&torrent_file).await; - self.assert_torrent_info().await; - self.assert_torrent_announce_urls(&torrent_file).await; - self.assert_torrent_files(&torrent_file).await; + let filepath_01 = self.torrent_file_path(upload_path, self.test_data.torrent_01.torrent_id); + let filepath_02 = self.torrent_file_path(upload_path, self.test_data.torrent_02.torrent_id); + + let torrent_file_01 = read_torrent_from_file(&filepath_01).unwrap(); + let torrent_file_02 = read_torrent_from_file(&filepath_02).unwrap(); + + // Check torrent 01 + self.assert_torrent(&self.test_data.torrent_01, &torrent_file_01) + .await; + self.assert_torrent_info(&self.test_data.torrent_01).await; + self.assert_torrent_announce_urls(&self.test_data.torrent_01, &torrent_file_01) + .await; + self.assert_torrent_files(&self.test_data.torrent_01, &torrent_file_01) + .await; + + // Check torrent 02 + self.assert_torrent(&self.test_data.torrent_02, &torrent_file_02) + .await; + self.assert_torrent_info(&self.test_data.torrent_02).await; + self.assert_torrent_announce_urls(&self.test_data.torrent_02, &torrent_file_02) + .await; + self.assert_torrent_files(&self.test_data.torrent_02, &torrent_file_02) + .await; } pub fn torrent_file_path(&self, upload_path: &str, torrent_id: i64) -> String { @@ -70,24 +120,18 @@ impl TorrentTester { } /// Table `torrust_torrents` - async fn assert_torrent(&self, torrent_file: &Torrent) { + async fn assert_torrent(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { let imported_torrent = self .destiny_database - .get_torrent(self.test_data.torrent.torrent_id) + .get_torrent(torrent.torrent_id) .await .unwrap(); - assert_eq!( - imported_torrent.torrent_id, - self.test_data.torrent.torrent_id - ); + assert_eq!(imported_torrent.torrent_id, torrent.torrent_id); assert_eq!(imported_torrent.uploader_id, self.test_data.user.user_id); - assert_eq!( - imported_torrent.category_id, - self.test_data.torrent.category_id - ); - assert_eq!(imported_torrent.info_hash, self.test_data.torrent.info_hash); - assert_eq!(imported_torrent.size, self.test_data.torrent.file_size); + assert_eq!(imported_torrent.category_id, torrent.category_id); + assert_eq!(imported_torrent.info_hash, torrent.info_hash); + assert_eq!(imported_torrent.size, torrent.file_size); assert_eq!(imported_torrent.name, torrent_file.info.name); assert_eq!( imported_torrent.pieces, @@ -108,28 +152,32 @@ impl TorrentTester { ); assert_eq!( imported_torrent.date_uploaded, - convert_timestamp_to_datetime(self.test_data.torrent.upload_date) + convert_timestamp_to_datetime(torrent.upload_date) ); } /// Table `torrust_torrent_info` - async fn assert_torrent_info(&self) { + async fn assert_torrent_info(&self, torrent: &TorrentRecordV1) { let torrent_info = self .destiny_database - .get_torrent_info(self.test_data.torrent.torrent_id) + .get_torrent_info(torrent.torrent_id) .await .unwrap(); - assert_eq!(torrent_info.torrent_id, self.test_data.torrent.torrent_id); - assert_eq!(torrent_info.title, self.test_data.torrent.title); - assert_eq!(torrent_info.description, self.test_data.torrent.description); + assert_eq!(torrent_info.torrent_id, torrent.torrent_id); + assert_eq!(torrent_info.title, torrent.title); + assert_eq!(torrent_info.description, torrent.description); } /// Table `torrust_torrent_announce_urls` - async fn assert_torrent_announce_urls(&self, torrent_file: &Torrent) { + async fn assert_torrent_announce_urls( + &self, + torrent: &TorrentRecordV1, + torrent_file: &Torrent, + ) { let torrent_announce_urls = self .destiny_database - .get_torrent_announce_urls(self.test_data.torrent.torrent_id) + .get_torrent_announce_urls(torrent.torrent_id) .await .unwrap(); @@ -144,24 +192,37 @@ impl TorrentTester { } /// Table `torrust_torrent_files` - async fn assert_torrent_files(&self, torrent_file: &Torrent) { + async fn assert_torrent_files(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { let db_torrent_files = self .destiny_database - .get_torrent_files(self.test_data.torrent.torrent_id) + .get_torrent_files(torrent.torrent_id) .await .unwrap(); if torrent_file.is_a_single_file_torrent() { let db_torrent_file = &db_torrent_files[0]; - assert_eq!( - db_torrent_file.torrent_id, - self.test_data.torrent.torrent_id - ); + assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); assert!(db_torrent_file.md5sum.is_none()); assert_eq!(db_torrent_file.length, torrent_file.info.length.unwrap()); assert!(db_torrent_file.path.is_none()); } else { - todo!(); + let files = torrent_file.info.files.as_ref().unwrap(); + + // Files in torrent file + for file in files.iter() { + let file_path = file.path.join("/"); + + // Find file in database + let db_torrent_file = db_torrent_files + .iter() + .find(|&f| f.path == Some(file_path.clone())) + .unwrap(); + + assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); + assert!(db_torrent_file.md5sum.is_none()); + assert_eq!(db_torrent_file.length, file.length); + assert_eq!(db_torrent_file.path, Some(file_path)); + } } } } From e23d94885f7fa22197dfa3ad458413572c087e53 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Nov 2022 17:42:19 +0000 Subject: [PATCH 44/53] refactor: remove duplication in tests --- .../testers/torrent_tester.rs | 51 ++++++------------- 1 file changed, 16 insertions(+), 35 deletions(-) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index 28e20e18..d7ec1e39 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -15,8 +15,7 @@ pub struct TorrentTester { } pub struct TestData { - pub torrent_01: TorrentRecordV1, - pub torrent_02: TorrentRecordV1, + pub torrents: Vec, pub user: UserRecordV1, } @@ -71,48 +70,30 @@ impl TorrentTester { source_database, destiny_database, test_data: TestData { - torrent_01, - torrent_02, + torrents: vec![torrent_01, torrent_02], user: user.clone(), }, } } pub async fn load_data_into_source_db(&self) { - self.source_database - .insert_torrent(&self.test_data.torrent_01) - .await - .unwrap(); - self.source_database - .insert_torrent(&self.test_data.torrent_02) - .await - .unwrap(); + for torrent in &self.test_data.torrents { + self.source_database.insert_torrent(&torrent).await.unwrap(); + } } pub async fn assert_data_in_destiny_db(&self, upload_path: &str) { - let filepath_01 = self.torrent_file_path(upload_path, self.test_data.torrent_01.torrent_id); - let filepath_02 = self.torrent_file_path(upload_path, self.test_data.torrent_02.torrent_id); - - let torrent_file_01 = read_torrent_from_file(&filepath_01).unwrap(); - let torrent_file_02 = read_torrent_from_file(&filepath_02).unwrap(); - - // Check torrent 01 - self.assert_torrent(&self.test_data.torrent_01, &torrent_file_01) - .await; - self.assert_torrent_info(&self.test_data.torrent_01).await; - self.assert_torrent_announce_urls(&self.test_data.torrent_01, &torrent_file_01) - .await; - self.assert_torrent_files(&self.test_data.torrent_01, &torrent_file_01) - .await; - - // Check torrent 02 - self.assert_torrent(&self.test_data.torrent_02, &torrent_file_02) - .await; - self.assert_torrent_info(&self.test_data.torrent_02).await; - self.assert_torrent_announce_urls(&self.test_data.torrent_02, &torrent_file_02) - .await; - self.assert_torrent_files(&self.test_data.torrent_02, &torrent_file_02) - .await; + for torrent in &self.test_data.torrents { + let filepath = self.torrent_file_path(upload_path, torrent.torrent_id); + + let torrent_file = read_torrent_from_file(&filepath).unwrap(); + + self.assert_torrent(&torrent, &torrent_file).await; + self.assert_torrent_info(&torrent).await; + self.assert_torrent_announce_urls(&torrent, &torrent_file) + .await; + self.assert_torrent_files(&torrent, &torrent_file).await; + } } pub fn torrent_file_path(&self, upload_path: &str, torrent_id: i64) -> String { From e1790f6991d693aaf4fe72d1c13cd9c10cd488d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Nov 2022 18:28:31 +0000 Subject: [PATCH 45/53] refactor: [#56] extract mods in upgrader --- .../from_v1_0_0_to_v2_0_0/databases/mod.rs | 27 ++ src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 3 +- .../transferrers/category_transferrer.rs | 39 ++ .../from_v1_0_0_to_v2_0_0/transferrers/mod.rs | 4 + .../transferrers/torrent_transferrer.rs | 198 +++++++++ .../transferrers/tracker_key_transferrer.rs | 45 ++ .../transferrers/user_transferrer.rs | 80 ++++ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 392 +----------------- .../testers/torrent_tester.rs | 2 +- 9 files changed, 408 insertions(+), 382 deletions(-) create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs create mode 100644 src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs index fa37d81b..0cc2e300 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs @@ -1,2 +1,29 @@ +use self::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use self::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; + pub mod sqlite_v1_0_0; pub mod sqlite_v2_0_0; + +pub async fn current_db(db_filename: &str) -> Arc { + let source_database_connect_url = format!("sqlite://{}?mode=ro", db_filename); + Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) +} + +pub async fn new_db(db_filename: &str) -> Arc { + let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); + Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) +} + +pub async fn migrate_destiny_database(dest_database: Arc) { + println!("Running migrations in destiny database..."); + dest_database.migrate().await; +} + +pub async fn reset_destiny_database(dest_database: Arc) { + println!("Truncating all tables in destiny database ..."); + dest_database + .delete_all_database_rows() + .await + .expect("Can't reset destiny database."); +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs index ef4843d0..afb35f90 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -1,2 +1,3 @@ +pub mod databases; +pub mod transferrers; pub mod upgrader; -pub mod databases; \ No newline at end of file diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs new file mode 100644 index 00000000..b8e20515 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs @@ -0,0 +1,39 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; + +pub async fn transfer_categories( + source_database: Arc, + dest_database: Arc, +) { + println!("Transferring categories ..."); + + let source_categories = source_database.get_categories_order_by_id().await.unwrap(); + println!("[v1] categories: {:?}", &source_categories); + + let result = dest_database.reset_categories_sequence().await.unwrap(); + println!("[v2] reset categories sequence result {:?}", result); + + for cat in &source_categories { + println!( + "[v2] adding category {:?} with id {:?} ...", + &cat.name, &cat.category_id + ); + let id = dest_database + .insert_category_and_get_id(&cat.name) + .await + .unwrap(); + + if id != cat.category_id { + panic!( + "Error copying category {:?} from source DB to destiny DB", + &cat.category_id + ); + } + + println!("[v2] category: {:?} {:?} added.", id, &cat.name); + } + + let dest_categories = dest_database.get_categories().await.unwrap(); + println!("[v2] categories: {:?}", &dest_categories); +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs new file mode 100644 index 00000000..94eaac75 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/mod.rs @@ -0,0 +1,4 @@ +pub mod category_transferrer; +pub mod torrent_transferrer; +pub mod tracker_key_transferrer; +pub mod user_transferrer; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs new file mode 100644 index 00000000..bcb096b0 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs @@ -0,0 +1,198 @@ +use crate::models::torrent_file::Torrent; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::TorrentRecordV2; +use crate::utils::parse_torrent::decode_torrent; +use std::sync::Arc; +use std::{error, fs}; + +pub async fn transfer_torrents( + source_database: Arc, + dest_database: Arc, + upload_path: &str, +) { + println!("Transferring torrents ..."); + + // Transfer table `torrust_torrents_files` + + // Although the The table `torrust_torrents_files` existed in version v1.0.0 + // it was was not used. + + // Transfer table `torrust_torrents` + + let torrents = source_database.get_torrents().await.unwrap(); + + for torrent in &torrents { + // [v2] table torrust_torrents + + println!( + "[v2][torrust_torrents] adding the torrent: {:?} ...", + &torrent.torrent_id + ); + + let uploader = source_database + .get_user_by_username(&torrent.uploader) + .await + .unwrap(); + + if uploader.username != torrent.uploader { + panic!( + "Error copying torrent with id {:?}. + Username (`uploader`) in `torrust_torrents` table does not match `username` in `torrust_users` table", + &torrent.torrent_id + ); + } + + let filepath = format!("{}/{}.torrent", upload_path, &torrent.torrent_id); + + let torrent_from_file_result = read_torrent_from_file(&filepath); + + if torrent_from_file_result.is_err() { + panic!("Error torrent file not found: {:?}", &filepath); + } + + let torrent_from_file = torrent_from_file_result.unwrap(); + + let id = dest_database + .insert_torrent(&TorrentRecordV2::from_v1_data( + torrent, + &torrent_from_file.info, + &uploader, + )) + .await + .unwrap(); + + if id != torrent.torrent_id { + panic!( + "Error copying torrent {:?} from source DB to destiny DB", + &torrent.torrent_id + ); + } + + println!( + "[v2][torrust_torrents] torrent with id {:?} added.", + &torrent.torrent_id + ); + + // [v2] table torrust_torrent_files + + println!("[v2][torrust_torrent_files] adding torrent files"); + + if torrent_from_file.is_a_single_file_torrent() { + // The torrent contains only one file then: + // - "path" is NULL + // - "md5sum" can be NULL + + println!( + "[v2][torrust_torrent_files][single-file-torrent] adding torrent file {:?} with length {:?} ...", + &torrent_from_file.info.name, &torrent_from_file.info.length, + ); + + let file_id = dest_database + .insert_torrent_file_for_torrent_with_one_file( + torrent.torrent_id, + // TODO: it seems med5sum can be None. Why? When? + &torrent_from_file.info.md5sum.clone(), + torrent_from_file.info.length.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_files][single-file-torrent] torrent file insert result: {:?}", + &file_id + ); + } else { + // Multiple files are being shared + let files = torrent_from_file.info.files.as_ref().unwrap(); + + for file in files.iter() { + println!( + "[v2][torrust_torrent_files][multiple-file-torrent] adding torrent file: {:?} ...", + &file + ); + + let file_id = dest_database + .insert_torrent_file_for_torrent_with_multiple_files(torrent, file) + .await; + + println!( + "[v2][torrust_torrent_files][multiple-file-torrent] torrent file insert result: {:?}", + &file_id + ); + } + } + + // [v2] table torrust_torrent_info + + println!( + "[v2][torrust_torrent_info] adding the torrent info for torrent id {:?} ...", + &torrent.torrent_id + ); + + let id = dest_database.insert_torrent_info(torrent).await; + + println!( + "[v2][torrust_torrents] torrent info insert result: {:?}.", + &id + ); + + // [v2] table torrust_torrent_announce_urls + + println!( + "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); + + if torrent_from_file.announce_list.is_some() { + // BEP-0012. Multiple trackers. + + println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + + // flatten the nested vec (this will however remove the) + let announce_urls = torrent_from_file + .announce_list + .clone() + .unwrap() + .into_iter() + .flatten() + .collect::>(); + + for tracker_url in announce_urls.iter() { + println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + + let announce_url_id = dest_database + .insert_torrent_announce_url(torrent.torrent_id, tracker_url) + .await; + + println!("[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", &announce_url_id); + } + } else if torrent_from_file.announce.is_some() { + println!("[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + + let announce_url_id = dest_database + .insert_torrent_announce_url( + torrent.torrent_id, + &torrent_from_file.announce.unwrap(), + ) + .await; + + println!( + "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", + &announce_url_id + ); + } + } + println!("Torrents transferred"); +} + +pub fn read_torrent_from_file(path: &str) -> Result> { + let contents = match fs::read(path) { + Ok(contents) => contents, + Err(e) => return Err(e.into()), + }; + + match decode_torrent(&contents) { + Ok(torrent) => Ok(torrent), + Err(e) => Err(e), + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs new file mode 100644 index 00000000..e639739a --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs @@ -0,0 +1,45 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; + +pub async fn transfer_tracker_keys( + source_database: Arc, + dest_database: Arc, +) { + println!("Transferring tracker keys ..."); + + // Transfer table `torrust_tracker_keys` + + let tracker_keys = source_database.get_tracker_keys().await.unwrap(); + + for tracker_key in &tracker_keys { + // [v2] table torrust_tracker_keys + + println!( + "[v2][torrust_users] adding the tracker key with id {:?} ...", + &tracker_key.key_id + ); + + let id = dest_database + .insert_tracker_key( + tracker_key.key_id, + tracker_key.user_id, + &tracker_key.key, + tracker_key.valid_until, + ) + .await + .unwrap(); + + if id != tracker_key.key_id { + panic!( + "Error copying tracker key {:?} from source DB to destiny DB", + &tracker_key.key_id + ); + } + + println!( + "[v2][torrust_tracker_keys] tracker key with id {:?} added.", + &tracker_key.key_id + ); + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs new file mode 100644 index 00000000..18d8d680 --- /dev/null +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs @@ -0,0 +1,80 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; + +pub async fn transfer_users( + source_database: Arc, + dest_database: Arc, + date_imported: &str, +) { + println!("Transferring users ..."); + + // Transfer table `torrust_users` + + let users = source_database.get_users().await.unwrap(); + + for user in &users { + // [v2] table torrust_users + + println!( + "[v2][torrust_users] adding user with username {:?} and id {:?} ...", + &user.username, &user.user_id + ); + + let id = dest_database + .insert_imported_user(user.user_id, date_imported, user.administrator) + .await + .unwrap(); + + if id != user.user_id { + panic!( + "Error copying user {:?} from source DB to destiny DB", + &user.user_id + ); + } + + println!( + "[v2][torrust_users] user: {:?} {:?} added.", + &user.user_id, &user.username + ); + + // [v2] table torrust_user_profiles + + println!( + "[v2][torrust_user_profiles] adding user profile for user with username {:?} and id {:?} ...", + &user.username, &user.user_id + ); + + dest_database + .insert_user_profile( + user.user_id, + &user.username, + &user.email, + user.email_verified, + ) + .await + .unwrap(); + + println!( + "[v2][torrust_user_profiles] user profile added for user with username {:?} and id {:?}.", + &user.username, &user.user_id + ); + + // [v2] table torrust_user_authentication + + println!( + "[v2][torrust_user_authentication] adding password hash ({:?}) for user id ({:?}) ...", + &user.password, &user.user_id + ); + + dest_database + .insert_user_password_hash(user.user_id, &user.password) + .await + .unwrap(); + + println!( + "[v2][torrust_user_authentication] password hash ({:?}) added for user id ({:?}).", + &user.password, &user.user_id + ); + } +} diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 91e42931..e2c32c52 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -3,7 +3,7 @@ //! NOTES for `torrust_users` table transfer: //! //! - In v2, the table `torrust_user` contains a field `date_registered` non existing in v1. -//! We changed that columns to allow NULL. WE also added the new column `date_imported` with +//! We changed that columns to allow NULL. We also added the new column `date_imported` with //! the datetime when the upgrader was executed. //! //! NOTES for `torrust_user_profiles` table transfer: @@ -11,18 +11,18 @@ //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{ - SqliteDatabaseV2_0_0, TorrentRecordV2, -}; -use crate::utils::parse_torrent::decode_torrent; -use crate::{ - models::torrent_file::Torrent, - upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0, +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{ + current_db, migrate_destiny_database, new_db, reset_destiny_database, }; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::category_transferrer::transfer_categories; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::transfer_torrents; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::user_transferrer::transfer_users; + use chrono::prelude::{DateTime, Utc}; -use std::{env, error, fs}; -use std::{sync::Arc, time::SystemTime}; +use std::env; +use std::time::SystemTime; use text_colorizer::*; @@ -86,8 +86,9 @@ pub async fn upgrade(args: &Arguments, date_imported: &str) { migrate_destiny_database(dest_database.clone()).await; reset_destiny_database(dest_database.clone()).await; + transfer_categories(source_database.clone(), dest_database.clone()).await; - transfer_user_data( + transfer_users( source_database.clone(), dest_database.clone(), date_imported, @@ -102,378 +103,9 @@ pub async fn upgrade(args: &Arguments, date_imported: &str) { .await; } -async fn current_db(db_filename: &str) -> Arc { - let source_database_connect_url = format!("sqlite://{}?mode=ro", db_filename); - Arc::new(SqliteDatabaseV1_0_0::new(&source_database_connect_url).await) -} - -async fn new_db(db_filename: &str) -> Arc { - let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); - Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) -} - -async fn migrate_destiny_database(dest_database: Arc) { - println!("Running migrations in destiny database..."); - dest_database.migrate().await; -} - -async fn reset_destiny_database(dest_database: Arc) { - println!("Truncating all tables in destiny database ..."); - dest_database - .delete_all_database_rows() - .await - .expect("Can't reset destiny database."); -} - -async fn transfer_categories( - source_database: Arc, - dest_database: Arc, -) { - println!("Transferring categories ..."); - - let source_categories = source_database.get_categories_order_by_id().await.unwrap(); - println!("[v1] categories: {:?}", &source_categories); - - let result = dest_database.reset_categories_sequence().await.unwrap(); - println!("[v2] reset categories sequence result {:?}", result); - - for cat in &source_categories { - println!( - "[v2] adding category {:?} with id {:?} ...", - &cat.name, &cat.category_id - ); - let id = dest_database - .insert_category_and_get_id(&cat.name) - .await - .unwrap(); - - if id != cat.category_id { - panic!( - "Error copying category {:?} from source DB to destiny DB", - &cat.category_id - ); - } - - println!("[v2] category: {:?} {:?} added.", id, &cat.name); - } - - let dest_categories = dest_database.get_categories().await.unwrap(); - println!("[v2] categories: {:?}", &dest_categories); -} - -async fn transfer_user_data( - source_database: Arc, - dest_database: Arc, - date_imported: &str, -) { - println!("Transferring users ..."); - - // Transfer table `torrust_users` - - let users = source_database.get_users().await.unwrap(); - - for user in &users { - // [v2] table torrust_users - - println!( - "[v2][torrust_users] adding user with username {:?} and id {:?} ...", - &user.username, &user.user_id - ); - - let id = dest_database - .insert_imported_user(user.user_id, date_imported, user.administrator) - .await - .unwrap(); - - if id != user.user_id { - panic!( - "Error copying user {:?} from source DB to destiny DB", - &user.user_id - ); - } - - println!( - "[v2][torrust_users] user: {:?} {:?} added.", - &user.user_id, &user.username - ); - - // [v2] table torrust_user_profiles - - println!( - "[v2][torrust_user_profiles] adding user profile for user with username {:?} and id {:?} ...", - &user.username, &user.user_id - ); - - dest_database - .insert_user_profile( - user.user_id, - &user.username, - &user.email, - user.email_verified, - ) - .await - .unwrap(); - - println!( - "[v2][torrust_user_profiles] user profile added for user with username {:?} and id {:?}.", - &user.username, &user.user_id - ); - - // [v2] table torrust_user_authentication - - println!( - "[v2][torrust_user_authentication] adding password hash ({:?}) for user id ({:?}) ...", - &user.password, &user.user_id - ); - - dest_database - .insert_user_password_hash(user.user_id, &user.password) - .await - .unwrap(); - - println!( - "[v2][torrust_user_authentication] password hash ({:?}) added for user id ({:?}).", - &user.password, &user.user_id - ); - } -} - /// Current datetime in ISO8601 without time zone. /// For example: 2022-11-10 10:35:15 pub fn datetime_iso_8601() -> String { let dt: DateTime = SystemTime::now().into(); format!("{}", dt.format("%Y-%m-%d %H:%M:%S")) } - -async fn transfer_tracker_keys( - source_database: Arc, - dest_database: Arc, -) { - println!("Transferring tracker keys ..."); - - // Transfer table `torrust_tracker_keys` - - let tracker_keys = source_database.get_tracker_keys().await.unwrap(); - - for tracker_key in &tracker_keys { - // [v2] table torrust_tracker_keys - - println!( - "[v2][torrust_users] adding the tracker key with id {:?} ...", - &tracker_key.key_id - ); - - let id = dest_database - .insert_tracker_key( - tracker_key.key_id, - tracker_key.user_id, - &tracker_key.key, - tracker_key.valid_until, - ) - .await - .unwrap(); - - if id != tracker_key.key_id { - panic!( - "Error copying tracker key {:?} from source DB to destiny DB", - &tracker_key.key_id - ); - } - - println!( - "[v2][torrust_tracker_keys] tracker key with id {:?} added.", - &tracker_key.key_id - ); - } -} - -async fn transfer_torrents( - source_database: Arc, - dest_database: Arc, - upload_path: &str, -) { - println!("Transferring torrents ..."); - - // Transfer table `torrust_torrents_files` - - // Although the The table `torrust_torrents_files` existed in version v1.0.0 - // it was was not used. - - // Transfer table `torrust_torrents` - - let torrents = source_database.get_torrents().await.unwrap(); - - for torrent in &torrents { - // [v2] table torrust_torrents - - println!( - "[v2][torrust_torrents] adding the torrent: {:?} ...", - &torrent.torrent_id - ); - - let uploader = source_database - .get_user_by_username(&torrent.uploader) - .await - .unwrap(); - - if uploader.username != torrent.uploader { - panic!( - "Error copying torrent with id {:?}. - Username (`uploader`) in `torrust_torrents` table does not match `username` in `torrust_users` table", - &torrent.torrent_id - ); - } - - let filepath = format!("{}/{}.torrent", upload_path, &torrent.torrent_id); - - let torrent_from_file_result = read_torrent_from_file(&filepath); - - if torrent_from_file_result.is_err() { - panic!("Error torrent file not found: {:?}", &filepath); - } - - let torrent_from_file = torrent_from_file_result.unwrap(); - - let id = dest_database - .insert_torrent(&TorrentRecordV2::from_v1_data( - torrent, - &torrent_from_file.info, - &uploader, - )) - .await - .unwrap(); - - if id != torrent.torrent_id { - panic!( - "Error copying torrent {:?} from source DB to destiny DB", - &torrent.torrent_id - ); - } - - println!( - "[v2][torrust_torrents] torrent with id {:?} added.", - &torrent.torrent_id - ); - - // [v2] table torrust_torrent_files - - println!("[v2][torrust_torrent_files] adding torrent files"); - - if torrent_from_file.is_a_single_file_torrent() { - // The torrent contains only one file then: - // - "path" is NULL - // - "md5sum" can be NULL - - println!( - "[v2][torrust_torrent_files][single-file-torrent] adding torrent file {:?} with length {:?} ...", - &torrent_from_file.info.name, &torrent_from_file.info.length, - ); - - let file_id = dest_database - .insert_torrent_file_for_torrent_with_one_file( - torrent.torrent_id, - // TODO: it seems med5sum can be None. Why? When? - &torrent_from_file.info.md5sum.clone(), - torrent_from_file.info.length.unwrap(), - ) - .await; - - println!( - "[v2][torrust_torrent_files][single-file-torrent] torrent file insert result: {:?}", - &file_id - ); - } else { - // Multiple files are being shared - let files = torrent_from_file.info.files.as_ref().unwrap(); - - for file in files.iter() { - println!( - "[v2][torrust_torrent_files][multiple-file-torrent] adding torrent file: {:?} ...", - &file - ); - - let file_id = dest_database - .insert_torrent_file_for_torrent_with_multiple_files(torrent, file) - .await; - - println!( - "[v2][torrust_torrent_files][multiple-file-torrent] torrent file insert result: {:?}", - &file_id - ); - } - } - - // [v2] table torrust_torrent_info - - println!( - "[v2][torrust_torrent_info] adding the torrent info for torrent id {:?} ...", - &torrent.torrent_id - ); - - let id = dest_database.insert_torrent_info(torrent).await; - - println!( - "[v2][torrust_torrents] torrent info insert result: {:?}.", - &id - ); - - // [v2] table torrust_torrent_announce_urls - - println!( - "[v2][torrust_torrent_announce_urls] adding the torrent announce url for torrent id {:?} ...", - &torrent.torrent_id - ); - - if torrent_from_file.announce_list.is_some() { - // BEP-0012. Multiple trackers. - - println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); - - // flatten the nested vec (this will however remove the) - let announce_urls = torrent_from_file - .announce_list - .clone() - .unwrap() - .into_iter() - .flatten() - .collect::>(); - - for tracker_url in announce_urls.iter() { - println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); - - let announce_url_id = dest_database - .insert_torrent_announce_url(torrent.torrent_id, tracker_url) - .await; - - println!("[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", &announce_url_id); - } - } else if torrent_from_file.announce.is_some() { - println!("[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); - - let announce_url_id = dest_database - .insert_torrent_announce_url( - torrent.torrent_id, - &torrent_from_file.announce.unwrap(), - ) - .await; - - println!( - "[v2][torrust_torrent_announce_urls][announce] torrent announce url insert result {:?} ...", - &announce_url_id - ); - } - } - println!("Torrents transferred"); -} - -pub fn read_torrent_from_file(path: &str) -> Result> { - let contents = match fs::read(path) { - Ok(contents) => contents, - Err(e) => return Err(e.into()), - }; - - match decode_torrent(&contents) { - Ok(torrent) => Ok(torrent), - Err(e) => Err(e), - } -} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index d7ec1e39..79256e86 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -6,7 +6,7 @@ use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1 TorrentRecordV1, UserRecordV1, }; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::convert_timestamp_to_datetime; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::read_torrent_from_file; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::read_torrent_from_file; pub struct TorrentTester { source_database: Arc, From b9a8bf92008d87e99af9261475b784f29f0df6d1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Nov 2022 18:32:07 +0000 Subject: [PATCH 46/53] fix: [#56] remove comment We do not need to read migrations from dir becuase they are not going to change for verion v1.0.0. --- tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs index cbc2a055..078f3a58 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -27,7 +27,6 @@ impl SqliteDatabaseV1_0_0 { pub async fn migrate(&self, fixtures_dir: &str) { let migrations_dir = format!("{}database/v1.0.0/migrations/", fixtures_dir); - // TODO: read files from dir let migrations = vec![ "20210831113004_torrust_users.sql", "20210904135524_torrust_tracker_keys.sql", From 38fee53ed46c9bd200dc31ab7dd5479c916005df Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 14 Nov 2022 14:12:28 +0000 Subject: [PATCH 47/53] test: [#56] new test for password verification The application now supports two hashing methods: - "pbkdf2-sha256": the old one. Only for imported users from DB version v1.0.0. - "argon2": the new one for registered users. --- src/routes/user.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/routes/user.rs b/src/routes/user.rs index 9195be7a..0feef088 100644 --- a/src/routes/user.rs +++ b/src/routes/user.rs @@ -279,3 +279,39 @@ pub async fn ban_user(req: HttpRequest, app_data: WebAppData) -> ServiceResult Date: Mon, 14 Nov 2022 16:18:17 +0000 Subject: [PATCH 48/53] fix: [#56] db migration for imported users Imported users from DB version v1.0.0 (only SQLite) do not have a "date_registered" field. WE have to copy that behavior in MySQL even if we do not have users imported from from previous versions in MySQL. Support for MySQL was added after the version v1.0.0. --- .../20221109092556_torrust_user_date_registered_allow_null.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql index 9f936f8a..92949e96 100644 --- a/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql +++ b/migrations/mysql/20221109092556_torrust_user_date_registered_allow_null.sql @@ -1 +1 @@ -ALTER TABLE torrust_users CHANGE date_registered date_registered DATETIME NOT NULL \ No newline at end of file +ALTER TABLE torrust_users CHANGE date_registered date_registered DATETIME DEFAULT NULL \ No newline at end of file From 8b761c8c1d17814c4582df7804465ff125b8ec9b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Nov 2022 13:13:23 +0000 Subject: [PATCH 49/53] feat: [#56] keep category id in DB migration script Instead of regenerating ID sequence we keep the category id. Becuase we were keeping IDs for all tables except for this one. @ldpr helped testing the migration script and found the issue with the categories IDs. Co-authored-by: ldpr <103618016+ldpr@users.noreply.github.com> --- .../databases/sqlite_v1_0_0.rs | 6 +- .../databases/sqlite_v2_0_0.rs | 9 +++ .../transferrers/category_transferrer.rs | 11 ++- .../from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 19 ++++- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 13 ++++ .../testers/category_tester.rs | 70 +++++++++++++++++++ .../from_v1_0_0_to_v2_0_0/testers/mod.rs | 1 + .../testers/torrent_tester.rs | 25 +++---- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 5 ++ 9 files changed, 140 insertions(+), 19 deletions(-) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs index a5743000..bec424ae 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v1_0_0.rs @@ -5,7 +5,7 @@ use sqlx::{query_as, SqlitePool}; use crate::databases::database::DatabaseError; #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] -pub struct CategoryRecord { +pub struct CategoryRecordV1 { pub category_id: i64, pub name: String, } @@ -64,8 +64,8 @@ impl SqliteDatabaseV1_0_0 { Self { pool: db } } - pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { - query_as::<_, CategoryRecord>( + pub async fn get_categories_order_by_id(&self) -> Result, DatabaseError> { + query_as::<_, CategoryRecordV1>( "SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC", ) .fetch_all(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index bee97bc2..828a63b9 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -117,6 +117,15 @@ impl SqliteDatabaseV2_0_0 { }) } + pub async fn insert_category(&self, category: &CategoryRecordV2) -> Result { + query("INSERT INTO torrust_categories (category_id, name) VALUES (?, ?)") + .bind(category.category_id) + .bind(category.name.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + pub async fn insert_imported_user( &self, user_id: i64, diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs index b8e20515..c48c27bf 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs @@ -1,5 +1,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{ + CategoryRecordV2, SqliteDatabaseV2_0_0, +}; use std::sync::Arc; pub async fn transfer_categories( @@ -12,7 +14,7 @@ pub async fn transfer_categories( println!("[v1] categories: {:?}", &source_categories); let result = dest_database.reset_categories_sequence().await.unwrap(); - println!("[v2] reset categories sequence result {:?}", result); + println!("[v2] reset categories sequence result: {:?}", result); for cat in &source_categories { println!( @@ -20,7 +22,10 @@ pub async fn transfer_categories( &cat.name, &cat.category_id ); let id = dest_database - .insert_category_and_get_id(&cat.name) + .insert_category(&CategoryRecordV2 { + category_id: cat.category_id, + name: cat.name.clone(), + }) .await .unwrap(); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs index 078f3a58..73f7d556 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -2,7 +2,7 @@ use sqlx::sqlite::SqlitePoolOptions; use sqlx::{query, SqlitePool}; use std::fs; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ - TorrentRecordV1, TrackerKeyRecordV1, UserRecordV1, + CategoryRecordV1, TorrentRecordV1, TrackerKeyRecordV1, UserRecordV1, }; pub struct SqliteDatabaseV1_0_0 { @@ -54,6 +54,23 @@ impl SqliteDatabaseV1_0_0 { println!("Migration result {:?}", res); } + pub async fn insert_category(&self, category: &CategoryRecordV1) -> Result { + query("INSERT INTO torrust_categories (category_id, name) VALUES (?, ?)") + .bind(category.category_id) + .bind(category.name.clone()) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) + } + + pub async fn delete_all_categories(&self) -> Result<(), sqlx::Error> { + query("DELETE FROM torrust_categories") + .execute(&self.pool) + .await + .unwrap(); + Ok(()) + } + pub async fn insert_user(&self, user: &UserRecordV1) -> Result { query("INSERT INTO torrust_users (user_id, username, email, email_verified, password, administrator) VALUES (?, ?, ?, ?, ?, ?)") .bind(user.user_id) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index 20a55daa..eea5f354 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -3,6 +3,12 @@ use sqlx::sqlite::SqlitePoolOptions; use sqlx::{query_as, SqlitePool}; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::TorrentRecordV2; +#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] +pub struct CategoryRecordV2 { + pub category_id: i64, + pub name: String, +} + #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct UserRecordV2 { pub user_id: i64, @@ -76,6 +82,13 @@ impl SqliteDatabaseV2_0_0 { Self { pool: db } } + pub async fn get_category(&self, category_id: i64) -> Result { + query_as::<_, CategoryRecordV2>("SELECT * FROM torrust_categories WHERE category_id = ?") + .bind(category_id) + .fetch_one(&self.pool) + .await + } + pub async fn get_user(&self, user_id: i64) -> Result { query_as::<_, UserRecordV2>("SELECT * FROM torrust_users WHERE user_id = ?") .bind(user_id) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs new file mode 100644 index 00000000..e8e79d54 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs @@ -0,0 +1,70 @@ +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::CategoryRecordV1; + +pub struct CategoryTester { + source_database: Arc, + destiny_database: Arc, + test_data: TestData, +} + +pub struct TestData { + pub categories: Vec, +} + +impl CategoryTester { + pub fn new( + source_database: Arc, + destiny_database: Arc, + ) -> Self { + let category_01 = CategoryRecordV1 { + category_id: 10, + name: "category name 10".to_string(), + }; + let category_02 = CategoryRecordV1 { + category_id: 11, + name: "category name 11".to_string(), + }; + + Self { + source_database, + destiny_database, + test_data: TestData { + categories: vec![category_01, category_02], + }, + } + } + + pub fn get_valid_category_id(&self) -> i64 { + self.test_data.categories[0].category_id + } + + /// Table `torrust_categories` + pub async fn load_data_into_source_db(&self) { + // Delete categories added by migrations + self.source_database.delete_all_categories().await.unwrap(); + + // Add test categories + for categories in &self.test_data.categories { + self.source_database + .insert_category(&categories) + .await + .unwrap(); + } + } + + /// Table `torrust_categories` + pub async fn assert_data_in_destiny_db(&self) { + for categories in &self.test_data.categories { + let imported_category = self + .destiny_database + .get_category(categories.category_id) + .await + .unwrap(); + + assert_eq!(imported_category.category_id, categories.category_id); + assert_eq!(imported_category.name, categories.name); + } + } +} diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs index 730b5149..36629cc3 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs @@ -1,3 +1,4 @@ +pub mod category_tester; pub mod torrent_tester; pub mod tracker_key_tester; pub mod user_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index 79256e86..9b4c8c2a 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -24,19 +24,20 @@ impl TorrentTester { source_database: Arc, destiny_database: Arc, user: &UserRecordV1, + category_id: i64, ) -> Self { let torrent_01 = TorrentRecordV1 { torrent_id: 1, uploader: user.username.clone(), info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), title: "A Mandelbrot Set 2048x2048px picture".to_string(), - category_id: 1, + category_id, description: Some( - "A beautiful Mandelbrot Set picture in black and white. \ - - Hybrid torrent V1 and V2. \ - - Single-file torrent. \ - - Public. \ - - More than one tracker URL. \ + "A beautiful Mandelbrot Set picture in black and white. \n \ + - Hybrid torrent V1 and V2. \n \ + - Single-file torrent. \n \ + - Public. \n \ + - More than one tracker URL. \n \ " .to_string(), ), @@ -50,13 +51,13 @@ impl TorrentTester { uploader: user.username.clone(), info_hash: "0902d375f18ec020f0cc68ed4810023032ba81cb".to_string(), title: "Two Mandelbrot Set 2048x2048px pictures".to_string(), - category_id: 1, + category_id, description: Some( - "Two beautiful Mandelbrot Set pictures in black and white. \ - - Hybrid torrent V1 and V2. \ - - Multiple-files torrent. \ - - Private. - - Only one tracker URL. + "Two beautiful Mandelbrot Set pictures in black and white. \n \ + - Hybrid torrent V1 and V2. \n \ + - Multiple-files torrent. \n \ + - Private. \n \ + - Only one tracker URL. \n \ " .to_string(), ), diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index a40f0a37..ee7ddc8f 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -13,6 +13,7 @@ //! to see the "upgrader" command output. use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::category_tester::CategoryTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::torrent_tester::TorrentTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_key_tester::TrackerKeyTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_tester::UserTester; @@ -56,6 +57,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { // The datetime when the upgrader is executed let execution_time = datetime_iso_8601(); + let category_tester = CategoryTester::new(source_db.clone(), dest_db.clone()); let user_tester = UserTester::new(source_db.clone(), dest_db.clone(), &execution_time); let tracker_key_tester = TrackerKeyTester::new( source_db.clone(), @@ -66,9 +68,11 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { source_db.clone(), dest_db.clone(), &user_tester.test_data.user, + category_tester.get_valid_category_id(), ); // Load data into source database in version v1.0.0 + category_tester.load_data_into_source_db().await; user_tester.load_data_into_source_db().await; tracker_key_tester.load_data_into_source_db().await; torrent_tester.load_data_into_source_db().await; @@ -85,6 +89,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { .await; // Assertions for data transferred to the new database in version v2.0.0 + category_tester.assert_data_in_destiny_db().await; user_tester.assert_data_in_destiny_db().await; tracker_key_tester.assert_data_in_destiny_db().await; torrent_tester From b400962657e893810592733d120c6d5bd9701c4a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Nov 2022 17:17:32 +0000 Subject: [PATCH 50/53] fix: format --- build.rs | 2 +- src/databases/sqlite.rs | 81 ++++---------- src/models/tracker_key.rs | 2 +- src/models/user.rs | 2 +- src/routes/user.rs | 16 ++- .../from_v1_0_0_to_v2_0_0/databases/mod.rs | 3 +- .../databases/sqlite_v1_0_0.rs | 18 ++- .../databases/sqlite_v2_0_0.rs | 103 ++++++------------ .../transferrers/category_transferrer.rs | 22 +--- .../transferrers/torrent_transferrer.rs | 64 +++++------ .../transferrers/tracker_key_transferrer.rs | 8 +- .../transferrers/user_transferrer.rs | 20 +--- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 35 ++---- src/upgrades/mod.rs | 2 +- .../from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs | 16 +-- .../from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs | 55 +++------- .../testers/category_tester.rs | 22 ++-- .../testers/torrent_tester.rs | 57 +++------- .../testers/tracker_key_tester.rs | 22 ++-- .../testers/user_tester.rs | 44 ++------ .../from_v1_0_0_to_v2_0_0/upgrader.rs | 24 ++-- tests/upgrades/mod.rs | 2 +- 22 files changed, 195 insertions(+), 425 deletions(-) diff --git a/build.rs b/build.rs index 76095938..d5068697 100644 --- a/build.rs +++ b/build.rs @@ -2,4 +2,4 @@ fn main() { // trigger recompilation when a new migration is added println!("cargo:rerun-if-changed=migrations"); -} \ No newline at end of file +} diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 62b197d1..835979fe 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -54,13 +54,12 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error)?; // add password hash for account - let insert_user_auth_result = - query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") - .bind(user_id) - .bind(password_hash) - .execute(&mut tx) - .await - .map_err(|_| DatabaseError::Error); + let insert_user_auth_result = query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") + .bind(user_id) + .bind(password_hash) + .execute(&mut tx) + .await + .map_err(|_| DatabaseError::Error); // rollback transaction on error if let Err(e) = insert_user_auth_result { @@ -109,23 +108,15 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::UserNotFound) } - async fn get_user_authentication_from_id( - &self, - user_id: i64, - ) -> Result { - query_as::<_, UserAuthentication>( - "SELECT * FROM torrust_user_authentication WHERE user_id = ?", - ) - .bind(user_id) - .fetch_one(&self.pool) - .await - .map_err(|_| DatabaseError::UserNotFound) + async fn get_user_authentication_from_id(&self, user_id: i64) -> Result { + query_as::<_, UserAuthentication>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await + .map_err(|_| DatabaseError::UserNotFound) } - async fn get_user_profile_from_username( - &self, - username: &str, - ) -> Result { + async fn get_user_profile_from_username(&self, username: &str) -> Result { query_as::<_, UserProfile>("SELECT * FROM torrust_user_profiles WHERE username = ?") .bind(username) .fetch_one(&self.pool) @@ -164,12 +155,7 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error) } - async fn ban_user( - &self, - user_id: i64, - reason: &str, - date_expiry: NaiveDateTime, - ) -> Result<(), DatabaseError> { + async fn ban_user(&self, user_id: i64, reason: &str, date_expiry: NaiveDateTime) -> Result<(), DatabaseError> { // date needs to be in ISO 8601 format let date_expiry_string = date_expiry.format("%Y-%m-%d %H:%M:%S").to_string(); @@ -207,11 +193,7 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error) } - async fn add_tracker_key( - &self, - user_id: i64, - tracker_key: &TrackerKey, - ) -> Result<(), DatabaseError> { + async fn add_tracker_key(&self, user_id: i64, tracker_key: &TrackerKey) -> Result<(), DatabaseError> { let key = tracker_key.key.clone(); query("INSERT INTO torrust_tracker_keys (user_id, tracker_key, date_expiry) VALUES ($1, $2, $3)") @@ -361,10 +343,7 @@ impl Database for SqliteDatabase { category_filter_query ); - let count_query = format!( - "SELECT COUNT(*) as count FROM ({}) AS count_table", - query_string - ); + let count_query = format!("SELECT COUNT(*) as count FROM ({}) AS count_table", query_string); let count_result: Result = query_as(&count_query) .bind(title.clone()) @@ -411,11 +390,7 @@ impl Database for SqliteDatabase { let (pieces, root_hash): (String, bool) = if let Some(pieces) = &torrent.info.pieces { (bytes_to_hex(pieces.as_ref()), false) } else { - let root_hash = torrent - .info - .root_hash - .as_ref() - .ok_or(DatabaseError::Error)?; + let root_hash = torrent.info.root_hash.as_ref().ok_or(DatabaseError::Error)?; (root_hash.to_string(), true) }; @@ -562,10 +537,7 @@ impl Database for SqliteDatabase { )) } - async fn get_torrent_info_from_id( - &self, - torrent_id: i64, - ) -> Result { + async fn get_torrent_info_from_id(&self, torrent_id: i64) -> Result { query_as::<_, DbTorrentInfo>( "SELECT name, pieces, piece_length, private, root_hash FROM torrust_torrents WHERE torrent_id = ?", ) @@ -604,10 +576,7 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::TorrentNotFound) } - async fn get_torrent_listing_from_id( - &self, - torrent_id: i64, - ) -> Result { + async fn get_torrent_listing_from_id(&self, torrent_id: i64) -> Result { query_as::<_, TorrentListing>( "SELECT tt.torrent_id, tp.username AS uploader, tt.info_hash, ti.title, ti.description, tt.category_id, tt.date_uploaded, tt.size AS file_size, CAST(COALESCE(sum(ts.seeders),0) as signed) as seeders, @@ -632,11 +601,7 @@ impl Database for SqliteDatabase { .map_err(|_| DatabaseError::Error) } - async fn update_torrent_title( - &self, - torrent_id: i64, - title: &str, - ) -> Result<(), DatabaseError> { + async fn update_torrent_title(&self, torrent_id: i64, title: &str) -> Result<(), DatabaseError> { query("UPDATE torrust_torrent_info SET title = $1 WHERE torrent_id = $2") .bind(title) .bind(torrent_id) @@ -661,11 +626,7 @@ impl Database for SqliteDatabase { }) } - async fn update_torrent_description( - &self, - torrent_id: i64, - description: &str, - ) -> Result<(), DatabaseError> { + async fn update_torrent_description(&self, torrent_id: i64, description: &str) -> Result<(), DatabaseError> { query("UPDATE torrust_torrent_info SET description = $1 WHERE torrent_id = $2") .bind(description) .bind(torrent_id) diff --git a/src/models/tracker_key.rs b/src/models/tracker_key.rs index 15e23622..b1baea72 100644 --- a/src/models/tracker_key.rs +++ b/src/models/tracker_key.rs @@ -17,4 +17,4 @@ pub struct NewTrackerKey { pub struct Duration { pub secs: i64, pub nanos: i64, -} \ No newline at end of file +} diff --git a/src/models/user.rs b/src/models/user.rs index f1418f3a..9a500d4d 100644 --- a/src/models/user.rs +++ b/src/models/user.rs @@ -11,7 +11,7 @@ pub struct User { #[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] pub struct UserAuthentication { pub user_id: i64, - pub password_hash: String + pub password_hash: String, } #[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone, sqlx::FromRow)] diff --git a/src/routes/user.rs b/src/routes/user.rs index 0feef088..df9a385a 100644 --- a/src/routes/user.rs +++ b/src/routes/user.rs @@ -282,17 +282,17 @@ pub async fn ban_user(req: HttpRequest, app_data: WebAppData) -> ServiceResult Result, DatabaseError> { - query_as::<_, CategoryRecordV1>( - "SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC", - ) - .fetch_all(&self.pool) - .await - .map_err(|_| DatabaseError::Error) + query_as::<_, CategoryRecordV1>("SELECT category_id, name FROM torrust_categories ORDER BY category_id ASC") + .fetch_all(&self.pool) + .await + .map_err(|_| DatabaseError::Error) } pub async fn get_users(&self) -> Result, sqlx::Error> { @@ -99,10 +97,8 @@ impl SqliteDatabaseV1_0_0 { } pub async fn get_torrent_files(&self) -> Result, sqlx::Error> { - query_as::<_, TorrentFileRecordV1>( - "SELECT * FROM torrust_torrent_files ORDER BY file_id ASC", - ) - .fetch_all(&self.pool) - .await + query_as::<_, TorrentFileRecordV1>("SELECT * FROM torrust_torrent_files ORDER BY file_id ASC") + .fetch_all(&self.pool) + .await } } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs index 828a63b9..35207ad4 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/sqlite_v2_0_0.rs @@ -3,11 +3,10 @@ use serde::{Deserialize, Serialize}; use sqlx::sqlite::{SqlitePoolOptions, SqliteQueryResult}; use sqlx::{query, query_as, SqlitePool}; +use super::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; use crate::databases::database::DatabaseError; use crate::models::torrent_file::{TorrentFile, TorrentInfo}; -use super::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; - #[derive(Debug, Serialize, Deserialize, sqlx::FromRow)] pub struct CategoryRecordV2 { pub category_id: i64, @@ -30,11 +29,7 @@ pub struct TorrentRecordV2 { } impl TorrentRecordV2 { - pub fn from_v1_data( - torrent: &TorrentRecordV1, - torrent_info: &TorrentInfo, - uploader: &UserRecordV1, - ) -> Self { + pub fn from_v1_data(torrent: &TorrentRecordV1, torrent_info: &TorrentInfo, uploader: &UserRecordV1) -> Self { Self { torrent_id: torrent.torrent_id, uploader_id: uploader.user_id, @@ -96,10 +91,7 @@ impl SqliteDatabaseV2_0_0 { .map_err(|_| DatabaseError::Error) } - pub async fn insert_category_and_get_id( - &self, - category_name: &str, - ) -> Result { + pub async fn insert_category_and_get_id(&self, category_name: &str) -> Result { query("INSERT INTO torrust_categories (name) VALUES (?)") .bind(category_name) .execute(&self.pool) @@ -126,12 +118,7 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } - pub async fn insert_imported_user( - &self, - user_id: i64, - date_imported: &str, - administrator: bool, - ) -> Result { + pub async fn insert_imported_user(&self, user_id: i64, date_imported: &str, administrator: bool) -> Result { query("INSERT INTO torrust_users (user_id, date_imported, administrator) VALUES (?, ?, ?)") .bind(user_id) .bind(date_imported) @@ -148,21 +135,19 @@ impl SqliteDatabaseV2_0_0 { email: &str, email_verified: bool, ) -> Result { - query("INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)") - .bind(user_id) - .bind(username) - .bind(email) - .bind(email_verified) - .execute(&self.pool) - .await - .map(|v| v.last_insert_rowid()) + query( + "INSERT INTO torrust_user_profiles (user_id, username, email, email_verified, bio, avatar) VALUES (?, ?, ?, ?, ?, ?)", + ) + .bind(user_id) + .bind(username) + .bind(email) + .bind(email_verified) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) } - pub async fn insert_user_password_hash( - &self, - user_id: i64, - password_hash: &str, - ) -> Result { + pub async fn insert_user_password_hash(&self, user_id: i64, password_hash: &str) -> Result { query("INSERT INTO torrust_user_authentication (user_id, password_hash) VALUES (?, ?)") .bind(user_id) .bind(password_hash) @@ -241,15 +226,14 @@ impl SqliteDatabaseV2_0_0 { torrent: &TorrentRecordV1, file: &TorrentFile, ) -> Result { - query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) VALUES (?, ?, ?, ?)", - ) - .bind(file.md5sum.clone()) - .bind(torrent.torrent_id) - .bind(file.length) - .bind(file.path.join("/")) - .execute(&self.pool) - .await - .map(|v| v.last_insert_rowid()) + query("INSERT INTO torrust_torrent_files (md5sum, torrent_id, LENGTH, PATH) VALUES (?, ?, ?, ?)") + .bind(file.md5sum.clone()) + .bind(torrent.torrent_id) + .bind(file.length) + .bind(file.path.join("/")) + .execute(&self.pool) + .await + .map(|v| v.last_insert_rowid()) } pub async fn insert_torrent_info(&self, torrent: &TorrentRecordV1) -> Result { @@ -262,11 +246,7 @@ impl SqliteDatabaseV2_0_0 { .map(|v| v.last_insert_rowid()) } - pub async fn insert_torrent_announce_url( - &self, - torrent_id: i64, - tracker_url: &str, - ) -> Result { + pub async fn insert_torrent_announce_url(&self, torrent_id: i64, tracker_url: &str) -> Result { query("INSERT INTO torrust_torrent_announce_urls (torrent_id, tracker_url) VALUES (?, ?)") .bind(torrent_id) .bind(tracker_url) @@ -276,50 +256,29 @@ impl SqliteDatabaseV2_0_0 { } pub async fn delete_all_database_rows(&self) -> Result<(), DatabaseError> { - query("DELETE FROM torrust_categories") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_categories").execute(&self.pool).await.unwrap(); - query("DELETE FROM torrust_torrents") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_torrents").execute(&self.pool).await.unwrap(); - query("DELETE FROM torrust_tracker_keys") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_tracker_keys").execute(&self.pool).await.unwrap(); - query("DELETE FROM torrust_users") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_users").execute(&self.pool).await.unwrap(); query("DELETE FROM torrust_user_authentication") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_bans") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_user_bans").execute(&self.pool).await.unwrap(); query("DELETE FROM torrust_user_invitations") .execute(&self.pool) .await .unwrap(); - query("DELETE FROM torrust_user_profiles") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_user_profiles").execute(&self.pool).await.unwrap(); - query("DELETE FROM torrust_torrents") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_torrents").execute(&self.pool).await.unwrap(); query("DELETE FROM torrust_user_public_keys") .execute(&self.pool) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs index c48c27bf..e95cfeda 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs @@ -1,13 +1,9 @@ -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{ - CategoryRecordV2, SqliteDatabaseV2_0_0, -}; use std::sync::Arc; -pub async fn transfer_categories( - source_database: Arc, - dest_database: Arc, -) { +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{CategoryRecordV2, SqliteDatabaseV2_0_0}; + +pub async fn transfer_categories(source_database: Arc, dest_database: Arc) { println!("Transferring categories ..."); let source_categories = source_database.get_categories_order_by_id().await.unwrap(); @@ -17,10 +13,7 @@ pub async fn transfer_categories( println!("[v2] reset categories sequence result: {:?}", result); for cat in &source_categories { - println!( - "[v2] adding category {:?} with id {:?} ...", - &cat.name, &cat.category_id - ); + println!("[v2] adding category {:?} with id {:?} ...", &cat.name, &cat.category_id); let id = dest_database .insert_category(&CategoryRecordV2 { category_id: cat.category_id, @@ -30,10 +23,7 @@ pub async fn transfer_categories( .unwrap(); if id != cat.category_id { - panic!( - "Error copying category {:?} from source DB to destiny DB", - &cat.category_id - ); + panic!("Error copying category {:?} from source DB to destiny DB", &cat.category_id); } println!("[v2] category: {:?} {:?} added.", id, &cat.name); diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs index bcb096b0..dcaa867a 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs @@ -1,10 +1,10 @@ +use std::sync::Arc; +use std::{error, fs}; + use crate::models::torrent_file::Torrent; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::TorrentRecordV2; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{SqliteDatabaseV2_0_0, TorrentRecordV2}; use crate::utils::parse_torrent::decode_torrent; -use std::sync::Arc; -use std::{error, fs}; pub async fn transfer_torrents( source_database: Arc, @@ -25,15 +25,9 @@ pub async fn transfer_torrents( for torrent in &torrents { // [v2] table torrust_torrents - println!( - "[v2][torrust_torrents] adding the torrent: {:?} ...", - &torrent.torrent_id - ); + println!("[v2][torrust_torrents] adding the torrent: {:?} ...", &torrent.torrent_id); - let uploader = source_database - .get_user_by_username(&torrent.uploader) - .await - .unwrap(); + let uploader = source_database.get_user_by_username(&torrent.uploader).await.unwrap(); if uploader.username != torrent.uploader { panic!( @@ -54,25 +48,15 @@ pub async fn transfer_torrents( let torrent_from_file = torrent_from_file_result.unwrap(); let id = dest_database - .insert_torrent(&TorrentRecordV2::from_v1_data( - torrent, - &torrent_from_file.info, - &uploader, - )) + .insert_torrent(&TorrentRecordV2::from_v1_data(torrent, &torrent_from_file.info, &uploader)) .await .unwrap(); if id != torrent.torrent_id { - panic!( - "Error copying torrent {:?} from source DB to destiny DB", - &torrent.torrent_id - ); + panic!("Error copying torrent {:?} from source DB to destiny DB", &torrent.torrent_id); } - println!( - "[v2][torrust_torrents] torrent with id {:?} added.", - &torrent.torrent_id - ); + println!("[v2][torrust_torrents] torrent with id {:?} added.", &torrent.torrent_id); // [v2] table torrust_torrent_files @@ -131,10 +115,7 @@ pub async fn transfer_torrents( let id = dest_database.insert_torrent_info(torrent).await; - println!( - "[v2][torrust_torrents] torrent info insert result: {:?}.", - &id - ); + println!("[v2][torrust_torrents] torrent info insert result: {:?}.", &id); // [v2] table torrust_torrent_announce_urls @@ -146,7 +127,10 @@ pub async fn transfer_torrents( if torrent_from_file.announce_list.is_some() { // BEP-0012. Multiple trackers. - println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + println!( + "[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); // flatten the nested vec (this will however remove the) let announce_urls = torrent_from_file @@ -158,22 +142,28 @@ pub async fn transfer_torrents( .collect::>(); for tracker_url in announce_urls.iter() { - println!("[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + println!( + "[v2][torrust_torrent_announce_urls][announce-list] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); let announce_url_id = dest_database .insert_torrent_announce_url(torrent.torrent_id, tracker_url) .await; - println!("[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", &announce_url_id); + println!( + "[v2][torrust_torrent_announce_urls][announce-list] torrent announce url insert result {:?} ...", + &announce_url_id + ); } } else if torrent_from_file.announce.is_some() { - println!("[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", &torrent.torrent_id); + println!( + "[v2][torrust_torrent_announce_urls][announce] adding the torrent announce url for torrent id {:?} ...", + &torrent.torrent_id + ); let announce_url_id = dest_database - .insert_torrent_announce_url( - torrent.torrent_id, - &torrent_from_file.announce.unwrap(), - ) + .insert_torrent_announce_url(torrent.torrent_id, &torrent_from_file.announce.unwrap()) .await; println!( diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs index e639739a..a2f3e753 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs @@ -1,11 +1,9 @@ +use std::sync::Arc; + use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use std::sync::Arc; -pub async fn transfer_tracker_keys( - source_database: Arc, - dest_database: Arc, -) { +pub async fn transfer_tracker_keys(source_database: Arc, dest_database: Arc) { println!("Transferring tracker keys ..."); // Transfer table `torrust_tracker_keys` diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs index 18d8d680..51d81727 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs @@ -1,6 +1,7 @@ +use std::sync::Arc; + use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use std::sync::Arc; pub async fn transfer_users( source_database: Arc, @@ -27,16 +28,10 @@ pub async fn transfer_users( .unwrap(); if id != user.user_id { - panic!( - "Error copying user {:?} from source DB to destiny DB", - &user.user_id - ); + panic!("Error copying user {:?} from source DB to destiny DB", &user.user_id); } - println!( - "[v2][torrust_users] user: {:?} {:?} added.", - &user.user_id, &user.username - ); + println!("[v2][torrust_users] user: {:?} {:?} added.", &user.user_id, &user.username); // [v2] table torrust_user_profiles @@ -46,12 +41,7 @@ pub async fn transfer_users( ); dest_database - .insert_user_profile( - user.user_id, - &user.username, - &user.email, - user.email_verified, - ) + .insert_user_profile(user.user_id, &user.username, &user.email, user.email_verified) .await .unwrap(); diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index e2c32c52..53b17cb4 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -11,28 +11,25 @@ //! - In v2, the table `torrust_user_profiles` contains two new fields: `bio` and `avatar`. //! Empty string is used as default value. -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{ - current_db, migrate_destiny_database, new_db, reset_destiny_database, -}; -use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::category_transferrer::transfer_categories; -use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::transfer_torrents; -use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; -use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::user_transferrer::transfer_users; - -use chrono::prelude::{DateTime, Utc}; - use std::env; use std::time::SystemTime; +use chrono::prelude::{DateTime, Utc}; use text_colorizer::*; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{current_db, migrate_destiny_database, new_db, reset_destiny_database}; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::category_transferrer::transfer_categories; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::transfer_torrents; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::user_transferrer::transfer_users; + const NUMBER_OF_ARGUMENTS: i64 = 3; #[derive(Debug)] pub struct Arguments { - pub source_database_file: String, // The source database in version v1.0.0 we want to migrate + pub source_database_file: String, // The source database in version v1.0.0 we want to migrate pub destiny_database_file: String, // The new migrated database in version v2.0.0 - pub upload_path: String, // The relative dir where torrent files are stored + pub upload_path: String, // The relative dir where torrent files are stored } fn print_usage() { @@ -88,19 +85,9 @@ pub async fn upgrade(args: &Arguments, date_imported: &str) { reset_destiny_database(dest_database.clone()).await; transfer_categories(source_database.clone(), dest_database.clone()).await; - transfer_users( - source_database.clone(), - dest_database.clone(), - date_imported, - ) - .await; + transfer_users(source_database.clone(), dest_database.clone(), date_imported).await; transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; - transfer_torrents( - source_database.clone(), - dest_database.clone(), - &args.upload_path, - ) - .await; + transfer_torrents(source_database.clone(), dest_database.clone(), &args.upload_path).await; } /// Current datetime in ISO8601 without time zone. diff --git a/src/upgrades/mod.rs b/src/upgrades/mod.rs index 736d54f6..e22b19a7 100644 --- a/src/upgrades/mod.rs +++ b/src/upgrades/mod.rs @@ -1 +1 @@ -pub mod from_v1_0_0_to_v2_0_0; \ No newline at end of file +pub mod from_v1_0_0_to_v2_0_0; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs index 73f7d556..fa1adc92 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v1_0_0.rs @@ -1,6 +1,7 @@ +use std::fs; + use sqlx::sqlite::SqlitePoolOptions; use sqlx::{query, SqlitePool}; -use std::fs; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ CategoryRecordV1, TorrentRecordV1, TrackerKeyRecordV1, UserRecordV1, }; @@ -46,8 +47,7 @@ impl SqliteDatabaseV1_0_0 { async fn run_migration_from_file(&self, migration_file_path: &str) { println!("Executing migration: {:?}", migration_file_path); - let sql = fs::read_to_string(migration_file_path) - .expect("Should have been able to read the file"); + let sql = fs::read_to_string(migration_file_path).expect("Should have been able to read the file"); let res = sqlx::query(&sql).execute(&self.pool).await; @@ -64,10 +64,7 @@ impl SqliteDatabaseV1_0_0 { } pub async fn delete_all_categories(&self) -> Result<(), sqlx::Error> { - query("DELETE FROM torrust_categories") - .execute(&self.pool) - .await - .unwrap(); + query("DELETE FROM torrust_categories").execute(&self.pool).await.unwrap(); Ok(()) } @@ -84,10 +81,7 @@ impl SqliteDatabaseV1_0_0 { .map(|v| v.last_insert_rowid()) } - pub async fn insert_tracker_key( - &self, - tracker_key: &TrackerKeyRecordV1, - ) -> Result { + pub async fn insert_tracker_key(&self, tracker_key: &TrackerKeyRecordV1) -> Result { query("INSERT INTO torrust_tracker_keys (key_id, user_id, key, valid_until) VALUES (?, ?, ?, ?)") .bind(tracker_key.key_id) .bind(tracker_key.user_id) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs index eea5f354..8d863c10 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/sqlite_v2_0_0.rs @@ -103,22 +103,14 @@ impl SqliteDatabaseV2_0_0 { .await } - pub async fn get_user_authentication( - &self, - user_id: i64, - ) -> Result { - query_as::<_, UserAuthenticationRecordV2>( - "SELECT * FROM torrust_user_authentication WHERE user_id = ?", - ) - .bind(user_id) - .fetch_one(&self.pool) - .await + pub async fn get_user_authentication(&self, user_id: i64) -> Result { + query_as::<_, UserAuthenticationRecordV2>("SELECT * FROM torrust_user_authentication WHERE user_id = ?") + .bind(user_id) + .fetch_one(&self.pool) + .await } - pub async fn get_tracker_key( - &self, - tracker_key_id: i64, - ) -> Result { + pub async fn get_tracker_key(&self, tracker_key_id: i64) -> Result { query_as::<_, TrackerKeyRecordV2>("SELECT * FROM torrust_tracker_keys WHERE user_id = ?") .bind(tracker_key_id) .fetch_one(&self.pool) @@ -132,34 +124,21 @@ impl SqliteDatabaseV2_0_0 { .await } - pub async fn get_torrent_info( - &self, - torrent_id: i64, - ) -> Result { - query_as::<_, TorrentInfoRecordV2>( - "SELECT * FROM torrust_torrent_info WHERE torrent_id = ?", - ) - .bind(torrent_id) - .fetch_one(&self.pool) - .await + pub async fn get_torrent_info(&self, torrent_id: i64) -> Result { + query_as::<_, TorrentInfoRecordV2>("SELECT * FROM torrust_torrent_info WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_one(&self.pool) + .await } - pub async fn get_torrent_announce_urls( - &self, - torrent_id: i64, - ) -> Result, sqlx::Error> { - query_as::<_, TorrentAnnounceUrlV2>( - "SELECT * FROM torrust_torrent_announce_urls WHERE torrent_id = ?", - ) - .bind(torrent_id) - .fetch_all(&self.pool) - .await + pub async fn get_torrent_announce_urls(&self, torrent_id: i64) -> Result, sqlx::Error> { + query_as::<_, TorrentAnnounceUrlV2>("SELECT * FROM torrust_torrent_announce_urls WHERE torrent_id = ?") + .bind(torrent_id) + .fetch_all(&self.pool) + .await } - pub async fn get_torrent_files( - &self, - torrent_id: i64, - ) -> Result, sqlx::Error> { + pub async fn get_torrent_files(&self, torrent_id: i64) -> Result, sqlx::Error> { query_as::<_, TorrentFileV2>("SELECT * FROM torrust_torrent_files WHERE torrent_id = ?") .bind(torrent_id) .fetch_all(&self.pool) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs index e8e79d54..897e7ccb 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs @@ -1,8 +1,10 @@ -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use std::sync::Arc; + use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::CategoryRecordV1; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + pub struct CategoryTester { source_database: Arc, destiny_database: Arc, @@ -14,10 +16,7 @@ pub struct TestData { } impl CategoryTester { - pub fn new( - source_database: Arc, - destiny_database: Arc, - ) -> Self { + pub fn new(source_database: Arc, destiny_database: Arc) -> Self { let category_01 = CategoryRecordV1 { category_id: 10, name: "category name 10".to_string(), @@ -47,21 +46,14 @@ impl CategoryTester { // Add test categories for categories in &self.test_data.categories { - self.source_database - .insert_category(&categories) - .await - .unwrap(); + self.source_database.insert_category(&categories).await.unwrap(); } } /// Table `torrust_categories` pub async fn assert_data_in_destiny_db(&self) { for categories in &self.test_data.categories { - let imported_category = self - .destiny_database - .get_category(categories.category_id) - .await - .unwrap(); + let imported_category = self.destiny_database.get_category(categories.category_id).await.unwrap(); assert_eq!(imported_category.category_id, categories.category_id); assert_eq!(imported_category.name, categories.name); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index 9b4c8c2a..47be2c67 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -1,13 +1,13 @@ -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use std::sync::Arc; + use torrust_index_backend::models::torrent_file::Torrent; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{ - TorrentRecordV1, UserRecordV1, -}; +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::{TorrentRecordV1, UserRecordV1}; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::convert_timestamp_to_datetime; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::read_torrent_from_file; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + pub struct TorrentTester { source_database: Arc, destiny_database: Arc, @@ -91,8 +91,7 @@ impl TorrentTester { self.assert_torrent(&torrent, &torrent_file).await; self.assert_torrent_info(&torrent).await; - self.assert_torrent_announce_urls(&torrent, &torrent_file) - .await; + self.assert_torrent_announce_urls(&torrent, &torrent_file).await; self.assert_torrent_files(&torrent, &torrent_file).await; } } @@ -103,11 +102,7 @@ impl TorrentTester { /// Table `torrust_torrents` async fn assert_torrent(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { - let imported_torrent = self - .destiny_database - .get_torrent(torrent.torrent_id) - .await - .unwrap(); + let imported_torrent = self.destiny_database.get_torrent(torrent.torrent_id).await.unwrap(); assert_eq!(imported_torrent.torrent_id, torrent.torrent_id); assert_eq!(imported_torrent.uploader_id, self.test_data.user.user_id); @@ -115,23 +110,14 @@ impl TorrentTester { assert_eq!(imported_torrent.info_hash, torrent.info_hash); assert_eq!(imported_torrent.size, torrent.file_size); assert_eq!(imported_torrent.name, torrent_file.info.name); - assert_eq!( - imported_torrent.pieces, - torrent_file.info.get_pieces_as_string() - ); - assert_eq!( - imported_torrent.piece_length, - torrent_file.info.piece_length - ); + assert_eq!(imported_torrent.pieces, torrent_file.info.get_pieces_as_string()); + assert_eq!(imported_torrent.piece_length, torrent_file.info.piece_length); if torrent_file.info.private.is_none() { assert_eq!(imported_torrent.private, Some(0)); } else { assert_eq!(imported_torrent.private, torrent_file.info.private); } - assert_eq!( - imported_torrent.root_hash, - torrent_file.info.get_root_hash_as_i64() - ); + assert_eq!(imported_torrent.root_hash, torrent_file.info.get_root_hash_as_i64()); assert_eq!( imported_torrent.date_uploaded, convert_timestamp_to_datetime(torrent.upload_date) @@ -140,11 +126,7 @@ impl TorrentTester { /// Table `torrust_torrent_info` async fn assert_torrent_info(&self, torrent: &TorrentRecordV1) { - let torrent_info = self - .destiny_database - .get_torrent_info(torrent.torrent_id) - .await - .unwrap(); + let torrent_info = self.destiny_database.get_torrent_info(torrent.torrent_id).await.unwrap(); assert_eq!(torrent_info.torrent_id, torrent.torrent_id); assert_eq!(torrent_info.title, torrent.title); @@ -152,11 +134,7 @@ impl TorrentTester { } /// Table `torrust_torrent_announce_urls` - async fn assert_torrent_announce_urls( - &self, - torrent: &TorrentRecordV1, - torrent_file: &Torrent, - ) { + async fn assert_torrent_announce_urls(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { let torrent_announce_urls = self .destiny_database .get_torrent_announce_urls(torrent.torrent_id) @@ -175,11 +153,7 @@ impl TorrentTester { /// Table `torrust_torrent_files` async fn assert_torrent_files(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { - let db_torrent_files = self - .destiny_database - .get_torrent_files(torrent.torrent_id) - .await - .unwrap(); + let db_torrent_files = self.destiny_database.get_torrent_files(torrent.torrent_id).await.unwrap(); if torrent_file.is_a_single_file_torrent() { let db_torrent_file = &db_torrent_files[0]; @@ -195,10 +169,7 @@ impl TorrentTester { let file_path = file.path.join("/"); // Find file in database - let db_torrent_file = db_torrent_files - .iter() - .find(|&f| f.path == Some(file_path.clone())) - .unwrap(); + let db_torrent_file = db_torrent_files.iter().find(|&f| f.path == Some(file_path.clone())).unwrap(); assert_eq!(db_torrent_file.torrent_id, torrent.torrent_id); assert!(db_torrent_file.md5sum.is_none()); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs index 3dfa4904..6ba44f5b 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs @@ -1,8 +1,10 @@ -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use std::sync::Arc; + use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::TrackerKeyRecordV1; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + pub struct TrackerKeyTester { source_database: Arc, destiny_database: Arc, @@ -14,11 +16,7 @@ pub struct TestData { } impl TrackerKeyTester { - pub fn new( - source_database: Arc, - destiny_database: Arc, - user_id: i64, - ) -> Self { + pub fn new(source_database: Arc, destiny_database: Arc, user_id: i64) -> Self { let tracker_key = TrackerKeyRecordV1 { key_id: 1, user_id, @@ -48,15 +46,9 @@ impl TrackerKeyTester { .await .unwrap(); - assert_eq!( - imported_key.tracker_key_id, - self.test_data.tracker_key.key_id - ); + assert_eq!(imported_key.tracker_key_id, self.test_data.tracker_key.key_id); assert_eq!(imported_key.user_id, self.test_data.tracker_key.user_id); assert_eq!(imported_key.tracker_key, self.test_data.tracker_key.key); - assert_eq!( - imported_key.date_expiry, - self.test_data.tracker_key.valid_until - ); + assert_eq!(imported_key.date_expiry, self.test_data.tracker_key.valid_until); } } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs index d349a47f..870d7fa0 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs @@ -1,11 +1,13 @@ -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; +use std::sync::Arc; + use argon2::password_hash::SaltString; use argon2::{Argon2, PasswordHasher}; use rand_core::OsRng; -use std::sync::Arc; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::UserRecordV1; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; +use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; + pub struct UserTester { source_database: Arc, destiny_database: Arc, @@ -41,10 +43,7 @@ impl UserTester { } pub async fn load_data_into_source_db(&self) { - self.source_database - .insert_user(&self.test_data.user) - .await - .unwrap(); + self.source_database.insert_user(&self.test_data.user).await.unwrap(); } pub async fn assert_data_in_destiny_db(&self) { @@ -55,19 +54,12 @@ impl UserTester { /// Table `torrust_users` async fn assert_user(&self) { - let imported_user = self - .destiny_database - .get_user(self.test_data.user.user_id) - .await - .unwrap(); + let imported_user = self.destiny_database.get_user(self.test_data.user.user_id).await.unwrap(); assert_eq!(imported_user.user_id, self.test_data.user.user_id); assert!(imported_user.date_registered.is_none()); assert_eq!(imported_user.date_imported.unwrap(), self.execution_time); - assert_eq!( - imported_user.administrator, - self.test_data.user.administrator - ); + assert_eq!(imported_user.administrator, self.test_data.user.administrator); } /// Table `torrust_user_profiles` @@ -81,10 +73,7 @@ impl UserTester { assert_eq!(imported_user_profile.user_id, self.test_data.user.user_id); assert_eq!(imported_user_profile.username, self.test_data.user.username); assert_eq!(imported_user_profile.email, self.test_data.user.email); - assert_eq!( - imported_user_profile.email_verified, - self.test_data.user.email_verified - ); + assert_eq!(imported_user_profile.email_verified, self.test_data.user.email_verified); assert!(imported_user_profile.bio.is_none()); assert!(imported_user_profile.avatar.is_none()); } @@ -97,14 +86,8 @@ impl UserTester { .await .unwrap(); - assert_eq!( - imported_user_authentication.user_id, - self.test_data.user.user_id - ); - assert_eq!( - imported_user_authentication.password_hash, - self.test_data.user.password - ); + assert_eq!(imported_user_authentication.user_id, self.test_data.user.user_id); + assert_eq!(imported_user_authentication.password_hash, self.test_data.user.password); } } @@ -123,8 +106,5 @@ fn hash_password(plain_password: &str) -> String { let argon2 = Argon2::default(); // Hash password to PHC string ($argon2id$v=19$...) - argon2 - .hash_password(plain_password.as_bytes(), &salt) - .unwrap() - .to_string() + argon2.hash_password(plain_password.as_bytes(), &salt).unwrap().to_string() } diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index ee7ddc8f..63daee3a 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -11,17 +11,17 @@ //! ``` //! //! to see the "upgrader" command output. +use std::fs; +use std::sync::Arc; + +use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{datetime_iso_8601, upgrade, Arguments}; + use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::category_tester::CategoryTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::torrent_tester::TorrentTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_key_tester::TrackerKeyTester; use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_tester::UserTester; -use std::fs; -use std::sync::Arc; -use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{ - datetime_iso_8601, upgrade, Arguments, -}; struct TestConfig { // Directories @@ -59,11 +59,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { let category_tester = CategoryTester::new(source_db.clone(), dest_db.clone()); let user_tester = UserTester::new(source_db.clone(), dest_db.clone(), &execution_time); - let tracker_key_tester = TrackerKeyTester::new( - source_db.clone(), - dest_db.clone(), - user_tester.test_data.user.user_id, - ); + let tracker_key_tester = TrackerKeyTester::new(source_db.clone(), dest_db.clone(), user_tester.test_data.user.user_id); let torrent_tester = TorrentTester::new( source_db.clone(), dest_db.clone(), @@ -92,14 +88,10 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { category_tester.assert_data_in_destiny_db().await; user_tester.assert_data_in_destiny_db().await; tracker_key_tester.assert_data_in_destiny_db().await; - torrent_tester - .assert_data_in_destiny_db(&config.upload_path) - .await; + torrent_tester.assert_data_in_destiny_db(&config.upload_path).await; } -async fn setup_databases( - config: &TestConfig, -) -> (Arc, Arc) { +async fn setup_databases(config: &TestConfig) -> (Arc, Arc) { // Set up clean source database reset_databases(&config.source_database_file, &config.destiny_database_file); let source_database = source_db_connection(&config.source_database_file).await; diff --git a/tests/upgrades/mod.rs b/tests/upgrades/mod.rs index 736d54f6..e22b19a7 100644 --- a/tests/upgrades/mod.rs +++ b/tests/upgrades/mod.rs @@ -1 +1 @@ -pub mod from_v1_0_0_to_v2_0_0; \ No newline at end of file +pub mod from_v1_0_0_to_v2_0_0; From e8d984d790e4bc1f1c95498b10ad050bbed076be Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Nov 2022 12:34:16 +0000 Subject: [PATCH 51/53] refactor: [#56] rename destiny DB to target DB --- .../from_v1_0_0_to_v2_0_0/databases/mod.rs | 18 +++---- .../transferrers/category_transferrer.rs | 15 +++--- .../transferrers/torrent_transferrer.rs | 19 ++++--- .../transferrers/tracker_key_transferrer.rs | 6 +-- .../transferrers/user_transferrer.rs | 10 ++-- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 28 +++++------ .../testers/category_tester.rs | 10 ++-- .../testers/torrent_tester.rs | 16 +++--- .../testers/tracker_key_tester.rs | 10 ++-- .../testers/user_tester.rs | 14 +++--- .../from_v1_0_0_to_v2_0_0/upgrader.rs | 50 +++++++++++-------- upgrades/from_v1_0_0_to_v2_0_0/README.md | 2 +- 12 files changed, 105 insertions(+), 93 deletions(-) diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs index 5b3be9b1..936527ab 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/databases/mod.rs @@ -12,19 +12,19 @@ pub async fn current_db(db_filename: &str) -> Arc { } pub async fn new_db(db_filename: &str) -> Arc { - let dest_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); - Arc::new(SqliteDatabaseV2_0_0::new(&dest_database_connect_url).await) + let target_database_connect_url = format!("sqlite://{}?mode=rwc", db_filename); + Arc::new(SqliteDatabaseV2_0_0::new(&target_database_connect_url).await) } -pub async fn migrate_destiny_database(dest_database: Arc) { - println!("Running migrations in destiny database..."); - dest_database.migrate().await; +pub async fn migrate_target_database(target_database: Arc) { + println!("Running migrations in the target database..."); + target_database.migrate().await; } -pub async fn reset_destiny_database(dest_database: Arc) { - println!("Truncating all tables in destiny database ..."); - dest_database +pub async fn reset_target_database(target_database: Arc) { + println!("Truncating all tables in target database ..."); + target_database .delete_all_database_rows() .await - .expect("Can't reset destiny database."); + .expect("Can't reset the target database."); } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs index e95cfeda..f3d83d9b 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/category_transferrer.rs @@ -3,18 +3,18 @@ use std::sync::Arc; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::{CategoryRecordV2, SqliteDatabaseV2_0_0}; -pub async fn transfer_categories(source_database: Arc, dest_database: Arc) { +pub async fn transfer_categories(source_database: Arc, target_database: Arc) { println!("Transferring categories ..."); let source_categories = source_database.get_categories_order_by_id().await.unwrap(); println!("[v1] categories: {:?}", &source_categories); - let result = dest_database.reset_categories_sequence().await.unwrap(); + let result = target_database.reset_categories_sequence().await.unwrap(); println!("[v2] reset categories sequence result: {:?}", result); for cat in &source_categories { println!("[v2] adding category {:?} with id {:?} ...", &cat.name, &cat.category_id); - let id = dest_database + let id = target_database .insert_category(&CategoryRecordV2 { category_id: cat.category_id, name: cat.name.clone(), @@ -23,12 +23,15 @@ pub async fn transfer_categories(source_database: Arc, des .unwrap(); if id != cat.category_id { - panic!("Error copying category {:?} from source DB to destiny DB", &cat.category_id); + panic!( + "Error copying category {:?} from source DB to the target DB", + &cat.category_id + ); } println!("[v2] category: {:?} {:?} added.", id, &cat.name); } - let dest_categories = dest_database.get_categories().await.unwrap(); - println!("[v2] categories: {:?}", &dest_categories); + let target_categories = target_database.get_categories().await.unwrap(); + println!("[v2] categories: {:?}", &target_categories); } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs index dcaa867a..88a681f0 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/torrent_transferrer.rs @@ -8,7 +8,7 @@ use crate::utils::parse_torrent::decode_torrent; pub async fn transfer_torrents( source_database: Arc, - dest_database: Arc, + target_database: Arc, upload_path: &str, ) { println!("Transferring torrents ..."); @@ -47,13 +47,16 @@ pub async fn transfer_torrents( let torrent_from_file = torrent_from_file_result.unwrap(); - let id = dest_database + let id = target_database .insert_torrent(&TorrentRecordV2::from_v1_data(torrent, &torrent_from_file.info, &uploader)) .await .unwrap(); if id != torrent.torrent_id { - panic!("Error copying torrent {:?} from source DB to destiny DB", &torrent.torrent_id); + panic!( + "Error copying torrent {:?} from source DB to the target DB", + &torrent.torrent_id + ); } println!("[v2][torrust_torrents] torrent with id {:?} added.", &torrent.torrent_id); @@ -72,7 +75,7 @@ pub async fn transfer_torrents( &torrent_from_file.info.name, &torrent_from_file.info.length, ); - let file_id = dest_database + let file_id = target_database .insert_torrent_file_for_torrent_with_one_file( torrent.torrent_id, // TODO: it seems med5sum can be None. Why? When? @@ -95,7 +98,7 @@ pub async fn transfer_torrents( &file ); - let file_id = dest_database + let file_id = target_database .insert_torrent_file_for_torrent_with_multiple_files(torrent, file) .await; @@ -113,7 +116,7 @@ pub async fn transfer_torrents( &torrent.torrent_id ); - let id = dest_database.insert_torrent_info(torrent).await; + let id = target_database.insert_torrent_info(torrent).await; println!("[v2][torrust_torrents] torrent info insert result: {:?}.", &id); @@ -147,7 +150,7 @@ pub async fn transfer_torrents( &torrent.torrent_id ); - let announce_url_id = dest_database + let announce_url_id = target_database .insert_torrent_announce_url(torrent.torrent_id, tracker_url) .await; @@ -162,7 +165,7 @@ pub async fn transfer_torrents( &torrent.torrent_id ); - let announce_url_id = dest_database + let announce_url_id = target_database .insert_torrent_announce_url(torrent.torrent_id, &torrent_from_file.announce.unwrap()) .await; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs index a2f3e753..51c451b0 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/tracker_key_transferrer.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -pub async fn transfer_tracker_keys(source_database: Arc, dest_database: Arc) { +pub async fn transfer_tracker_keys(source_database: Arc, target_database: Arc) { println!("Transferring tracker keys ..."); // Transfer table `torrust_tracker_keys` @@ -18,7 +18,7 @@ pub async fn transfer_tracker_keys(source_database: Arc, d &tracker_key.key_id ); - let id = dest_database + let id = target_database .insert_tracker_key( tracker_key.key_id, tracker_key.user_id, @@ -30,7 +30,7 @@ pub async fn transfer_tracker_keys(source_database: Arc, d if id != tracker_key.key_id { panic!( - "Error copying tracker key {:?} from source DB to destiny DB", + "Error copying tracker key {:?} from source DB to the target DB", &tracker_key.key_id ); } diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs index 51d81727..76f5ff44 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/transferrers/user_transferrer.rs @@ -5,7 +5,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::sqlite_v2_0_0::SqliteData pub async fn transfer_users( source_database: Arc, - dest_database: Arc, + target_database: Arc, date_imported: &str, ) { println!("Transferring users ..."); @@ -22,13 +22,13 @@ pub async fn transfer_users( &user.username, &user.user_id ); - let id = dest_database + let id = target_database .insert_imported_user(user.user_id, date_imported, user.administrator) .await .unwrap(); if id != user.user_id { - panic!("Error copying user {:?} from source DB to destiny DB", &user.user_id); + panic!("Error copying user {:?} from source DB to the target DB", &user.user_id); } println!("[v2][torrust_users] user: {:?} {:?} added.", &user.user_id, &user.username); @@ -40,7 +40,7 @@ pub async fn transfer_users( &user.username, &user.user_id ); - dest_database + target_database .insert_user_profile(user.user_id, &user.username, &user.email, user.email_verified) .await .unwrap(); @@ -57,7 +57,7 @@ pub async fn transfer_users( &user.password, &user.user_id ); - dest_database + target_database .insert_user_password_hash(user.user_id, &user.password) .await .unwrap(); diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 53b17cb4..07accb78 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -17,7 +17,7 @@ use std::time::SystemTime; use chrono::prelude::{DateTime, Utc}; use text_colorizer::*; -use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{current_db, migrate_destiny_database, new_db, reset_destiny_database}; +use crate::upgrades::from_v1_0_0_to_v2_0_0::databases::{current_db, migrate_target_database, new_db, reset_target_database}; use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::category_transferrer::transfer_categories; use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::transfer_torrents; use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; @@ -27,9 +27,9 @@ const NUMBER_OF_ARGUMENTS: i64 = 3; #[derive(Debug)] pub struct Arguments { - pub source_database_file: String, // The source database in version v1.0.0 we want to migrate - pub destiny_database_file: String, // The new migrated database in version v2.0.0 - pub upload_path: String, // The relative dir where torrent files are stored + pub source_database_file: String, // The source database in version v1.0.0 we want to migrate + pub target_database_file: String, // The new migrated database in version v2.0.0 + pub upload_path: String, // The relative dir where torrent files are stored } fn print_usage() { @@ -62,7 +62,7 @@ fn parse_args() -> Arguments { Arguments { source_database_file: args[0].clone(), - destiny_database_file: args[1].clone(), + target_database_file: args[1].clone(), upload_path: args[2].clone(), } } @@ -73,21 +73,21 @@ pub async fn run_upgrader() { } pub async fn upgrade(args: &Arguments, date_imported: &str) { - // Get connection to source database (current DB in settings) + // Get connection to the source database (current DB in settings) let source_database = current_db(&args.source_database_file).await; - // Get connection to destiny database - let dest_database = new_db(&args.destiny_database_file).await; + // Get connection to the target database (new DB we want to migrate the data) + let target_database = new_db(&args.target_database_file).await; println!("Upgrading data from version v1.0.0 to v2.0.0 ..."); - migrate_destiny_database(dest_database.clone()).await; - reset_destiny_database(dest_database.clone()).await; + migrate_target_database(target_database.clone()).await; + reset_target_database(target_database.clone()).await; - transfer_categories(source_database.clone(), dest_database.clone()).await; - transfer_users(source_database.clone(), dest_database.clone(), date_imported).await; - transfer_tracker_keys(source_database.clone(), dest_database.clone()).await; - transfer_torrents(source_database.clone(), dest_database.clone(), &args.upload_path).await; + transfer_categories(source_database.clone(), target_database.clone()).await; + transfer_users(source_database.clone(), target_database.clone(), date_imported).await; + transfer_tracker_keys(source_database.clone(), target_database.clone()).await; + transfer_torrents(source_database.clone(), target_database.clone(), &args.upload_path).await; } /// Current datetime in ISO8601 without time zone. diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs index 897e7ccb..c10f93b8 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs @@ -7,7 +7,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; pub struct CategoryTester { source_database: Arc, - destiny_database: Arc, + target_database: Arc, test_data: TestData, } @@ -16,7 +16,7 @@ pub struct TestData { } impl CategoryTester { - pub fn new(source_database: Arc, destiny_database: Arc) -> Self { + pub fn new(source_database: Arc, target_database: Arc) -> Self { let category_01 = CategoryRecordV1 { category_id: 10, name: "category name 10".to_string(), @@ -28,7 +28,7 @@ impl CategoryTester { Self { source_database, - destiny_database, + target_database, test_data: TestData { categories: vec![category_01, category_02], }, @@ -51,9 +51,9 @@ impl CategoryTester { } /// Table `torrust_categories` - pub async fn assert_data_in_destiny_db(&self) { + pub async fn assert_data_in_target_db(&self) { for categories in &self.test_data.categories { - let imported_category = self.destiny_database.get_category(categories.category_id).await.unwrap(); + let imported_category = self.target_database.get_category(categories.category_id).await.unwrap(); assert_eq!(imported_category.category_id, categories.category_id); assert_eq!(imported_category.name, categories.name); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs index 47be2c67..86bd1e52 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs @@ -10,7 +10,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; pub struct TorrentTester { source_database: Arc, - destiny_database: Arc, + target_database: Arc, test_data: TestData, } @@ -22,7 +22,7 @@ pub struct TestData { impl TorrentTester { pub fn new( source_database: Arc, - destiny_database: Arc, + target_database: Arc, user: &UserRecordV1, category_id: i64, ) -> Self { @@ -69,7 +69,7 @@ impl TorrentTester { Self { source_database, - destiny_database, + target_database, test_data: TestData { torrents: vec![torrent_01, torrent_02], user: user.clone(), @@ -83,7 +83,7 @@ impl TorrentTester { } } - pub async fn assert_data_in_destiny_db(&self, upload_path: &str) { + pub async fn assert_data_in_target_db(&self, upload_path: &str) { for torrent in &self.test_data.torrents { let filepath = self.torrent_file_path(upload_path, torrent.torrent_id); @@ -102,7 +102,7 @@ impl TorrentTester { /// Table `torrust_torrents` async fn assert_torrent(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { - let imported_torrent = self.destiny_database.get_torrent(torrent.torrent_id).await.unwrap(); + let imported_torrent = self.target_database.get_torrent(torrent.torrent_id).await.unwrap(); assert_eq!(imported_torrent.torrent_id, torrent.torrent_id); assert_eq!(imported_torrent.uploader_id, self.test_data.user.user_id); @@ -126,7 +126,7 @@ impl TorrentTester { /// Table `torrust_torrent_info` async fn assert_torrent_info(&self, torrent: &TorrentRecordV1) { - let torrent_info = self.destiny_database.get_torrent_info(torrent.torrent_id).await.unwrap(); + let torrent_info = self.target_database.get_torrent_info(torrent.torrent_id).await.unwrap(); assert_eq!(torrent_info.torrent_id, torrent.torrent_id); assert_eq!(torrent_info.title, torrent.title); @@ -136,7 +136,7 @@ impl TorrentTester { /// Table `torrust_torrent_announce_urls` async fn assert_torrent_announce_urls(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { let torrent_announce_urls = self - .destiny_database + .target_database .get_torrent_announce_urls(torrent.torrent_id) .await .unwrap(); @@ -153,7 +153,7 @@ impl TorrentTester { /// Table `torrust_torrent_files` async fn assert_torrent_files(&self, torrent: &TorrentRecordV1, torrent_file: &Torrent) { - let db_torrent_files = self.destiny_database.get_torrent_files(torrent.torrent_id).await.unwrap(); + let db_torrent_files = self.target_database.get_torrent_files(torrent.torrent_id).await.unwrap(); if torrent_file.is_a_single_file_torrent() { let db_torrent_file = &db_torrent_files[0]; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs index 6ba44f5b..e50ac861 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs @@ -7,7 +7,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; pub struct TrackerKeyTester { source_database: Arc, - destiny_database: Arc, + target_database: Arc, test_data: TestData, } @@ -16,7 +16,7 @@ pub struct TestData { } impl TrackerKeyTester { - pub fn new(source_database: Arc, destiny_database: Arc, user_id: i64) -> Self { + pub fn new(source_database: Arc, target_database: Arc, user_id: i64) -> Self { let tracker_key = TrackerKeyRecordV1 { key_id: 1, user_id, @@ -26,7 +26,7 @@ impl TrackerKeyTester { Self { source_database, - destiny_database, + target_database, test_data: TestData { tracker_key }, } } @@ -39,9 +39,9 @@ impl TrackerKeyTester { } /// Table `torrust_tracker_keys` - pub async fn assert_data_in_destiny_db(&self) { + pub async fn assert_data_in_target_db(&self) { let imported_key = self - .destiny_database + .target_database .get_tracker_key(self.test_data.tracker_key.key_id) .await .unwrap(); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs index 870d7fa0..2d52a683 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs @@ -10,7 +10,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; pub struct UserTester { source_database: Arc, - destiny_database: Arc, + target_database: Arc, execution_time: String, pub test_data: TestData, } @@ -22,7 +22,7 @@ pub struct TestData { impl UserTester { pub fn new( source_database: Arc, - destiny_database: Arc, + target_database: Arc, execution_time: &str, ) -> Self { let user = UserRecordV1 { @@ -36,7 +36,7 @@ impl UserTester { Self { source_database, - destiny_database, + target_database, execution_time: execution_time.to_owned(), test_data: TestData { user }, } @@ -46,7 +46,7 @@ impl UserTester { self.source_database.insert_user(&self.test_data.user).await.unwrap(); } - pub async fn assert_data_in_destiny_db(&self) { + pub async fn assert_data_in_target_db(&self) { self.assert_user().await; self.assert_user_profile().await; self.assert_user_authentication().await; @@ -54,7 +54,7 @@ impl UserTester { /// Table `torrust_users` async fn assert_user(&self) { - let imported_user = self.destiny_database.get_user(self.test_data.user.user_id).await.unwrap(); + let imported_user = self.target_database.get_user(self.test_data.user.user_id).await.unwrap(); assert_eq!(imported_user.user_id, self.test_data.user.user_id); assert!(imported_user.date_registered.is_none()); @@ -65,7 +65,7 @@ impl UserTester { /// Table `torrust_user_profiles` async fn assert_user_profile(&self) { let imported_user_profile = self - .destiny_database + .target_database .get_user_profile(self.test_data.user.user_id) .await .unwrap(); @@ -81,7 +81,7 @@ impl UserTester { /// Table `torrust_user_profiles` async fn assert_user_authentication(&self) { let imported_user_authentication = self - .destiny_database + .target_database .get_user_authentication(self.test_data.user.user_id) .await .unwrap(); diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 63daee3a..aa0e2a75 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -12,6 +12,7 @@ //! //! to see the "upgrader" command output. use std::fs; +use std::path::Path; use std::sync::Arc; use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{datetime_iso_8601, upgrade, Arguments}; @@ -29,7 +30,7 @@ struct TestConfig { pub upload_path: String, // Files pub source_database_file: String, - pub destiny_database_file: String, + pub target_database_file: String, } impl Default for TestConfig { @@ -38,12 +39,12 @@ impl Default for TestConfig { let upload_path = format!("{}uploads/", &fixtures_dir); let output_dir = "./tests/upgrades/from_v1_0_0_to_v2_0_0/output/".to_string(); let source_database_file = format!("{}source.db", output_dir); - let destiny_database_file = format!("{}destiny.db", output_dir); + let target_database_file = format!("{}target.db", output_dir); Self { fixtures_dir, upload_path, source_database_file, - destiny_database_file, + target_database_file, } } } @@ -52,17 +53,17 @@ impl Default for TestConfig { async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { let config = TestConfig::default(); - let (source_db, dest_db) = setup_databases(&config).await; + let (source_db, target_db) = setup_databases(&config).await; // The datetime when the upgrader is executed let execution_time = datetime_iso_8601(); - let category_tester = CategoryTester::new(source_db.clone(), dest_db.clone()); - let user_tester = UserTester::new(source_db.clone(), dest_db.clone(), &execution_time); - let tracker_key_tester = TrackerKeyTester::new(source_db.clone(), dest_db.clone(), user_tester.test_data.user.user_id); + let category_tester = CategoryTester::new(source_db.clone(), target_db.clone()); + let user_tester = UserTester::new(source_db.clone(), target_db.clone(), &execution_time); + let tracker_key_tester = TrackerKeyTester::new(source_db.clone(), target_db.clone(), user_tester.test_data.user.user_id); let torrent_tester = TorrentTester::new( source_db.clone(), - dest_db.clone(), + target_db.clone(), &user_tester.test_data.user, category_tester.get_valid_category_id(), ); @@ -77,7 +78,7 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { upgrade( &Arguments { source_database_file: config.source_database_file.clone(), - destiny_database_file: config.destiny_database_file.clone(), + target_database_file: config.target_database_file.clone(), upload_path: config.upload_path.clone(), }, &execution_time, @@ -85,34 +86,39 @@ async fn upgrades_data_from_version_v1_0_0_to_v2_0_0() { .await; // Assertions for data transferred to the new database in version v2.0.0 - category_tester.assert_data_in_destiny_db().await; - user_tester.assert_data_in_destiny_db().await; - tracker_key_tester.assert_data_in_destiny_db().await; - torrent_tester.assert_data_in_destiny_db(&config.upload_path).await; + category_tester.assert_data_in_target_db().await; + user_tester.assert_data_in_target_db().await; + tracker_key_tester.assert_data_in_target_db().await; + torrent_tester.assert_data_in_target_db(&config.upload_path).await; } async fn setup_databases(config: &TestConfig) -> (Arc, Arc) { // Set up clean source database - reset_databases(&config.source_database_file, &config.destiny_database_file); + reset_databases(&config.source_database_file, &config.target_database_file); let source_database = source_db_connection(&config.source_database_file).await; source_database.migrate(&config.fixtures_dir).await; - // Set up connection for the destiny database - let destiny_database = destiny_db_connection(&config.destiny_database_file).await; + // Set up connection for the target database + let target_database = target_db_connection(&config.target_database_file).await; - (source_database, destiny_database) + (source_database, target_database) } async fn source_db_connection(source_database_file: &str) -> Arc { Arc::new(SqliteDatabaseV1_0_0::db_connection(&source_database_file).await) } -async fn destiny_db_connection(destiny_database_file: &str) -> Arc { - Arc::new(SqliteDatabaseV2_0_0::db_connection(&destiny_database_file).await) +async fn target_db_connection(target_database_file: &str) -> Arc { + Arc::new(SqliteDatabaseV2_0_0::db_connection(&target_database_file).await) } /// Reset databases from previous executions -fn reset_databases(source_database_file: &str, destiny_database_file: &str) { - fs::remove_file(&source_database_file).expect("Can't remove source DB file."); - fs::remove_file(&destiny_database_file).expect("Can't remove destiny DB file."); +fn reset_databases(source_database_file: &str, target_database_file: &str) { + if Path::new(source_database_file).exists() { + fs::remove_file(&source_database_file).expect("Can't remove the source DB file."); + } + + if Path::new(target_database_file).exists() { + fs::remove_file(&target_database_file).expect("Can't remove the target DB file."); + } } diff --git a/upgrades/from_v1_0_0_to_v2_0_0/README.md b/upgrades/from_v1_0_0_to_v2_0_0/README.md index cd2c1c11..37609149 100644 --- a/upgrades/from_v1_0_0_to_v2_0_0/README.md +++ b/upgrades/from_v1_0_0_to_v2_0_0/README.md @@ -31,4 +31,4 @@ Before replacing the DB in production you can make some tests like: ## Notes -The `db_schemas` contains the snapshots of the source and destiny databases for this upgrade. +The `db_schemas` contains the snapshots of the source and target databases for this upgrade. From 19d054e5f8b72c3218e9b16b77fa215c110b9c13 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Nov 2022 12:48:30 +0000 Subject: [PATCH 52/53] refactor: [#56] rename test mods to follow prod mods --- tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs | 2 +- tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs | 4 ---- .../category_transferrer_tester.rs} | 0 .../from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs | 4 ++++ .../torrent_transferrer_tester.rs} | 0 .../tracker_key_transferrer_tester.rs} | 0 .../user_transferrer_tester.rs} | 0 tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs | 8 ++++---- 8 files changed, 9 insertions(+), 9 deletions(-) delete mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs rename tests/upgrades/from_v1_0_0_to_v2_0_0/{testers/category_tester.rs => transferrer_testers/category_transferrer_tester.rs} (100%) create mode 100644 tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs rename tests/upgrades/from_v1_0_0_to_v2_0_0/{testers/torrent_tester.rs => transferrer_testers/torrent_transferrer_tester.rs} (100%) rename tests/upgrades/from_v1_0_0_to_v2_0_0/{testers/tracker_key_tester.rs => transferrer_testers/tracker_key_transferrer_tester.rs} (100%) rename tests/upgrades/from_v1_0_0_to_v2_0_0/{testers/user_tester.rs => transferrer_testers/user_transferrer_tester.rs} (100%) diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs index 7a5e3bb7..29897ff7 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/mod.rs @@ -1,4 +1,4 @@ pub mod sqlite_v1_0_0; pub mod sqlite_v2_0_0; -pub mod testers; +pub mod transferrer_testers; pub mod upgrader; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs deleted file mode 100644 index 36629cc3..00000000 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod category_tester; -pub mod torrent_tester; -pub mod tracker_key_tester; -pub mod user_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs similarity index 100% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/testers/category_tester.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/category_transferrer_tester.rs diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs new file mode 100644 index 00000000..459bcac8 --- /dev/null +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/mod.rs @@ -0,0 +1,4 @@ +pub mod category_transferrer_tester; +pub mod torrent_transferrer_tester; +pub mod tracker_key_transferrer_tester; +pub mod user_transferrer_tester; diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs similarity index 100% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/testers/torrent_tester.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/torrent_transferrer_tester.rs diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs similarity index 100% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/testers/tracker_key_tester.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/tracker_key_transferrer_tester.rs diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs similarity index 100% rename from tests/upgrades/from_v1_0_0_to_v2_0_0/testers/user_tester.rs rename to tests/upgrades/from_v1_0_0_to_v2_0_0/transferrer_testers/user_transferrer_tester.rs diff --git a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index aa0e2a75..9e207b22 100644 --- a/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/tests/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -19,10 +19,10 @@ use torrust_index_backend::upgrades::from_v1_0_0_to_v2_0_0::upgrader::{datetime_ use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v1_0_0::SqliteDatabaseV1_0_0; use crate::upgrades::from_v1_0_0_to_v2_0_0::sqlite_v2_0_0::SqliteDatabaseV2_0_0; -use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::category_tester::CategoryTester; -use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::torrent_tester::TorrentTester; -use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::tracker_key_tester::TrackerKeyTester; -use crate::upgrades::from_v1_0_0_to_v2_0_0::testers::user_tester::UserTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::category_transferrer_tester::CategoryTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::torrent_transferrer_tester::TorrentTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::tracker_key_transferrer_tester::TrackerKeyTester; +use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrer_testers::user_transferrer_tester::UserTester; struct TestConfig { // Directories From 5a7d87517cbaa8867a0a7f35a0a8a288205c361a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Nov 2022 13:54:14 +0000 Subject: [PATCH 53/53] feat: [#56] console command to import tracker stats for all torrents --- src/bin/import_tracker_statistics.rs | 10 +++ .../commands/import_tracker_statistics.rs | 86 +++++++++++++++++++ src/console/commands/mod.rs | 1 + src/console/mod.rs | 1 + src/lib.rs | 1 + .../from_v1_0_0_to_v2_0_0/upgrader.rs | 14 ++- 6 files changed, 111 insertions(+), 2 deletions(-) create mode 100644 src/bin/import_tracker_statistics.rs create mode 100644 src/console/commands/import_tracker_statistics.rs create mode 100644 src/console/commands/mod.rs create mode 100644 src/console/mod.rs diff --git a/src/bin/import_tracker_statistics.rs b/src/bin/import_tracker_statistics.rs new file mode 100644 index 00000000..3f8456c4 --- /dev/null +++ b/src/bin/import_tracker_statistics.rs @@ -0,0 +1,10 @@ +//! Import Tracker Statistics command. +//! It imports the number of seeders and leechers for all torrent from the linked tracker. +//! You can execute it with: `cargo run --bin import_tracker_statistics` + +use torrust_index_backend::console::commands::import_tracker_statistics::run_importer; + +#[actix_web::main] +async fn main() { + run_importer().await; +} diff --git a/src/console/commands/import_tracker_statistics.rs b/src/console/commands/import_tracker_statistics.rs new file mode 100644 index 00000000..f5dba839 --- /dev/null +++ b/src/console/commands/import_tracker_statistics.rs @@ -0,0 +1,86 @@ +//! It imports statistics for all torrents from the linked tracker. + +use std::env; +use std::sync::Arc; + +use derive_more::{Display, Error}; +use text_colorizer::*; + +use crate::config::Configuration; +use crate::databases::database::connect_database; +use crate::tracker::TrackerService; + +const NUMBER_OF_ARGUMENTS: usize = 0; + +#[derive(Debug)] +pub struct Arguments {} + +#[derive(Debug, Display, PartialEq, Error)] +#[allow(dead_code)] +pub enum ImportError { + #[display(fmt = "internal server error")] + WrongNumberOfArgumentsError, +} + +fn parse_args() -> Result { + let args: Vec = env::args().skip(1).collect(); + + if args.len() != NUMBER_OF_ARGUMENTS { + eprintln!( + "{} wrong number of arguments: expected {}, got {}", + "Error".red().bold(), + NUMBER_OF_ARGUMENTS, + args.len() + ); + print_usage(); + return Err(ImportError::WrongNumberOfArgumentsError); + } + + Ok(Arguments {}) +} + +fn print_usage() { + eprintln!( + "{} - imports torrents statistics from linked tracker. + + cargo run --bin upgrade SOURCE_DB_FILE DESTINY_DB_FILE TORRENT_UPLOAD_DIR + + For example: + + cargo run --bin import_tracker_statistics + + ", + "Upgrader".green() + ); +} + +pub async fn run_importer() { + import(&parse_args().unwrap()).await; +} + +pub async fn import(_args: &Arguments) { + println!("Importing statistics from linked tracker ..."); + + let cfg = match Configuration::load_from_file().await { + Ok(config) => Arc::new(config), + Err(error) => { + panic!("{}", error) + } + }; + + let settings = cfg.settings.read().await; + + let tracker_url = settings.tracker.url.clone(); + + eprintln!("Tracker url: {}", tracker_url.green()); + + let database = Arc::new( + connect_database(&settings.database.connect_url) + .await + .expect("Database error."), + ); + + let tracker_service = Arc::new(TrackerService::new(cfg.clone(), database.clone())); + + tracker_service.update_torrents().await.unwrap(); +} diff --git a/src/console/commands/mod.rs b/src/console/commands/mod.rs new file mode 100644 index 00000000..6dad4966 --- /dev/null +++ b/src/console/commands/mod.rs @@ -0,0 +1 @@ +pub mod import_tracker_statistics; diff --git a/src/console/mod.rs b/src/console/mod.rs new file mode 100644 index 00000000..82b6da3c --- /dev/null +++ b/src/console/mod.rs @@ -0,0 +1 @@ +pub mod commands; diff --git a/src/lib.rs b/src/lib.rs index d7ef0d09..0d2cc49e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,7 @@ pub mod auth; pub mod common; pub mod config; +pub mod console; pub mod databases; pub mod errors; pub mod mailer; diff --git a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs index 07accb78..0e18d417 100644 --- a/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs +++ b/src/upgrades/from_v1_0_0_to_v2_0_0/upgrader.rs @@ -23,7 +23,7 @@ use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::torrent_transferrer::t use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::tracker_key_transferrer::transfer_tracker_keys; use crate::upgrades::from_v1_0_0_to_v2_0_0::transferrers::user_transferrer::transfer_users; -const NUMBER_OF_ARGUMENTS: i64 = 3; +const NUMBER_OF_ARGUMENTS: usize = 3; #[derive(Debug)] pub struct Arguments { @@ -50,7 +50,7 @@ fn print_usage() { fn parse_args() -> Arguments { let args: Vec = env::args().skip(1).collect(); - if args.len() != 3 { + if args.len() != NUMBER_OF_ARGUMENTS { eprintln!( "{} wrong number of arguments: expected {}, got {}", "Error".red().bold(), @@ -88,6 +88,16 @@ pub async fn upgrade(args: &Arguments, date_imported: &str) { transfer_users(source_database.clone(), target_database.clone(), date_imported).await; transfer_tracker_keys(source_database.clone(), target_database.clone()).await; transfer_torrents(source_database.clone(), target_database.clone(), &args.upload_path).await; + + println!("Upgrade data from version v1.0.0 to v2.0.0 finished!\n"); + + eprintln!( + "{}\nWe recommend you to run the command to import torrent statistics for all torrents manually. \ + If you do not do it the statistics will be imported anyway during the normal execution of the program. \ + You can import statistics manually with:\n {}", + "SUGGESTION: \n".yellow(), + "cargo run --bin import_tracker_statistics".yellow() + ); } /// Current datetime in ISO8601 without time zone.