Skip to content

Commit

Permalink
Merge pull request #38 from torrust/revert-34-development
Browse files Browse the repository at this point in the history
Revert "Development"
  • Loading branch information
mickvandijke authored Apr 25, 2022
2 parents f331c08 + 5d1ad9d commit 4cf4e95
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 36 deletions.
2 changes: 0 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ Torrust Tracker is a lightweight but incredibly powerful and feature-rich BitTor
* [X] Peer authentication using time-bound keys
* [X] newTrackon check supported for both HTTP, UDP, where IPv4 and IPv6 is properly handled
* [X] SQLite3 Persistent loading and saving of the torrent hashes and completed count
* [X] MySQL support added as engine option
* [X] Periodically saving added, interval can be configured

### Implemented BEPs
* [BEP 3](https://www.bittorrent.org/beps/bep_0003.html): The BitTorrent Protocol
Expand Down
18 changes: 2 additions & 16 deletions src/mysql_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,26 +80,12 @@ impl Database for MysqlDatabase {

let mut db_transaction = conn.start_transaction(TxOpts::default()).map_err(|_| database::Error::DatabaseError)?;

let mut insert_vector= vec![];

for (info_hash, torrent_entry) in torrents {
let (_seeders, completed, _leechers) = torrent_entry.get_stats();
insert_vector.push(format!("(UNHEX('{}'), {})", info_hash.to_string(), completed.to_string()));
if insert_vector.len() == 1000 {
let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(","));
if db_transaction.query_drop(query).is_err() {
return Err(Error::InvalidQuery);
}
insert_vector.clear();
}
}

if insert_vector.len() != 0 {
let query = format!("INSERT INTO torrents (info_hash, completed) VALUES {} ON DUPLICATE KEY UPDATE completed = VALUES(completed)", insert_vector.join(","));
if db_transaction.query_drop(query).is_err() {
if db_transaction.exec_drop("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX(?), ?) ON DUPLICATE KEY UPDATE completed = completed", (info_hash.to_string(), completed.to_string())).is_err() {
return Err(Error::InvalidQuery);
}
insert_vector.clear();
debug!("INSERT INTO torrents (info_hash, completed) VALUES (UNHEX('{}'), {}) ON DUPLICATE KEY UPDATE completed = completed", info_hash.to_string(), completed.to_string());
}

if db_transaction.commit().is_err() {
Expand Down
3 changes: 1 addition & 2 deletions src/sqlite_database.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,7 @@ impl Database for SqliteDatabase {

for (info_hash, torrent_entry) in torrents {
let (_seeders, completed, _leechers) = torrent_entry.get_stats();
let _ = db_transaction.execute("INSERT OR IGNORE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]);
let _ = db_transaction.execute("UPDATE torrents SET completed = ? WHERE info_hash = ?", &[completed.to_string(), info_hash.to_string()]);
let _ = db_transaction.execute("INSERT OR REPLACE INTO torrents (info_hash, completed) VALUES (?, ?)", &[info_hash.to_string(), completed.to_string()]);
}

let _ = db_transaction.commit();
Expand Down
24 changes: 8 additions & 16 deletions src/tracker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::sync::Arc;

use log::{debug, info};
use log::info;
use serde::{Deserialize, Serialize};
use serde;
use tokio::sync::{RwLock, RwLockReadGuard};
Expand Down Expand Up @@ -128,7 +128,6 @@ impl TorrentTracker {
let torrents = self.database.load_persistent_torrent_data().await?;

for torrent in torrents {
debug!("{:#?}", torrent);
let _ = self.add_torrent(torrent.0, 0, torrent.1, 0).await;
}

Expand Down Expand Up @@ -308,48 +307,41 @@ impl TorrentTracker {
let mut updates = self.updates.write().await;
let mut updates_cloned: std::collections::HashMap<InfoHash, u32> = std::collections::HashMap::new();
// let mut torrent_hashes: Vec<InfoHash> = Vec::new();
info!("Copying updates to updates_cloned...");
for (k, completed) in updates.iter() {
updates_cloned.insert(k.clone(), completed.clone());
updates_cloned.insert(*k, *completed);
}
updates.clear();
drop(updates);

info!("Copying updates_cloned into the shadow to overwrite...");
let mut shadows = self.shadow.write().await;
for (k, completed) in updates_cloned.iter() {
let mut shadows = self.shadow.write().await;
if shadows.contains_key(k) {
shadows.remove(k);
}
shadows.insert(k.clone(), completed.clone());
drop(shadows);
shadows.insert(*k, *completed);
}
drop(updates_cloned);

// We updated the shadow data from the updates data, let's handle shadow data as expected.
info!("Handle shadow_copy to be updated into SQL...");
let mut shadow_copy: BTreeMap<InfoHash, TorrentEntry> = BTreeMap::new();
let shadows = self.shadow.read().await;
for (infohash, completed) in shadows.iter() {
shadow_copy.insert(infohash.clone(), TorrentEntry {
shadow_copy.insert(*infohash, TorrentEntry {
peers: Default::default(),
completed: completed.clone(),
completed: *completed,
seeders: 0,
});
}

// Drop the lock
drop(shadows);

// We will now save the data from the shadow into the database.
// This should not put any strain on the server itself, other then the harddisk/ssd.
info!("Start saving shadow data into SQL...");
let result = self.database.save_persistent_torrent_data(&shadow_copy).await;
if result.is_ok() {
info!("Done saving data to SQL and succeeded, emptying shadow...");
let mut shadow = self.shadow.write().await;
shadow.clear();
drop(shadow);
} else {
info!("Done saving data to SQL and failed, not emptying shadow...");
}
}
}

0 comments on commit 4cf4e95

Please sign in to comment.