diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index d1602c012e4..599f6406afa 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -100,7 +100,7 @@ jobs: if: runner.os == 'Windows' shell: cmd run: | - git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.72.0 https://github.com/boostorg/boost.git + git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.78.0 https://github.com/boostorg/boost.git cd boost bootstrap.bat @@ -115,7 +115,8 @@ jobs: if: runner.os == 'Windows' uses: nick-invision/retry@v2 with: - timeout_minutes: 30 + shell: cmd + timeout_minutes: 5 retry_wait_seconds: 4 max_attempts: 3 command: choco install openssl --limitoutput --no-progress diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 71822b030ac..4935e0bf7ad 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -47,7 +47,7 @@ jobs: - name: install boost run: | - git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.72.0 https://github.com/boostorg/boost.git + git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.78.0 https://github.com/boostorg/boost.git cd boost bootstrap.bat @@ -92,7 +92,7 @@ jobs: - name: install boost run: | - git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.72.0 https://github.com/boostorg/boost.git + git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.78.0 https://github.com/boostorg/boost.git cd boost bootstrap.bat @@ -141,7 +141,7 @@ jobs: - name: install boost run: | - git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.72.0 https://github.com/boostorg/boost.git + git clone --depth=1 --recurse-submodules -j10 --branch=boost-1.78.0 https://github.com/boostorg/boost.git cd boost bootstrap.bat diff --git a/CMakeLists.txt b/CMakeLists.txt index 48b6faa7842..45e80e97ddc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -89,6 +89,7 @@ set(libtorrent_include_files torrent_info.hpp torrent_status.hpp tracker_event.hpp + truncate.hpp units.hpp upnp.hpp version.hpp @@ -303,6 +304,7 @@ set(sources chained_buffer.cpp choker.cpp close_reason.cpp + copy_file.cpp cpuid.cpp crc32c.cpp create_torrent.cpp @@ -404,6 +406,7 @@ set(sources torrent_peer_allocator.cpp torrent_status.cpp tracker_manager.cpp + truncate.cpp udp_socket.cpp udp_tracker_connection.cpp upnp.cpp diff --git a/ChangeLog b/ChangeLog index 09ea2f00d30..3b00bad19d9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -5,6 +5,12 @@ * added support for WebTorrent + * fix issue creating a v2 torrent from torrent_info containing an empty file + * make recheck files also update which files use partfile + * add write_through disk_io_write_mode, which flushes pieces to disk immediately + * improve copy file function to preserve sparse regions (when supported) + * add function to truncate over-sized files part of a torrent + * fix directory creation on windows shared folders * add flag to make add_files() not record file attributes * deprecate (unused) allow_partial_disk_writes settings * fix disk-full error reporting in mmap_disk_io @@ -103,6 +109,7 @@ * added support for GnuTLS for HTTPS and torrents over SSL + * send User-Agent field in anonymous mode * fix python binding for settings_pack conversion * fix DHT announce timer issue * use DSCP_TRAFFIC_TYPE socket option on windows diff --git a/Jamfile b/Jamfile index 8a23fa0454e..66fd52a5b6f 100644 --- a/Jamfile +++ b/Jamfile @@ -165,9 +165,15 @@ rule linking ( properties * ) { # for backtraces in assertion failures # which only works on ELF targets with gcc - result += -Wl,-export-dynamic -rdynamic ; + result += -Wl,--export-dynamic -rdynamic ; result += global ; } + else + { + # backtraces don't work with visibility=hidden, so we only add that in + # the else-block + result += hidden ; + } local BOOST_VERSION_TAG = [ modules.peek boostcpp : BOOST_VERSION_TAG ] ; local json ; @@ -339,9 +345,9 @@ rule building ( properties * ) if msvc in $(properties) || intel-win in $(properties) { # allow larger .obj files (with more sections) - result += /bigobj /bigobj ; + result += /bigobj ; # https://docs.microsoft.com/en-us/cpp/build/reference/utf-8-set-source-and-executable-character-sets-to-utf-8?view=msvc-170 - result += /utf-8 ; + result += /utf-8 ; } if gcc in $(properties) && windows in $(properties) @@ -731,6 +737,7 @@ SOURCES = chained_buffer choker close_reason + copy_file cpuid crc32c create_torrent @@ -843,6 +850,7 @@ SOURCES = posix_part_file posix_storage ssl + truncate rtc_signaling rtc_stream websocket_stream @@ -945,7 +953,6 @@ lib torrent multi 17 512 - hidden : # usage requirements $(usage-requirements) diff --git a/Makefile b/Makefile index c4184a3fd89..003d3e3a592 100644 --- a/Makefile +++ b/Makefile @@ -307,6 +307,7 @@ SOURCES = \ chained_buffer.cpp \ choker.cpp \ close_reason.cpp \ + copy_file.cpp \ cpuid.cpp \ crc32c.cpp \ create_torrent.cpp \ @@ -408,6 +409,7 @@ SOURCES = \ torrent_peer_allocator.cpp \ torrent_status.cpp \ tracker_manager.cpp \ + truncate.cpp \ udp_socket.cpp \ udp_tracker_connection.cpp \ upnp.cpp \ @@ -503,6 +505,7 @@ HEADERS = \ torrent_info.hpp \ torrent_status.hpp \ tracker_event.hpp \ + truncate.hpp \ units.hpp \ upnp.hpp \ version.hpp \ @@ -853,6 +856,7 @@ TEST_SOURCES = \ test_bloom_filter.cpp \ test_buffer.cpp \ test_checking.cpp \ + test_copy_file.cpp \ test_crc32.cpp \ test_create_torrent.cpp \ test_dht.cpp \ @@ -930,6 +934,7 @@ TEST_SOURCES = \ test_torrent_list.cpp \ test_tracker.cpp \ test_tracker_manager.cpp \ + test_truncate.cpp \ test_transfer.cpp \ test_upnp.cpp \ test_url_seed.cpp \ diff --git a/bindings/c/include/libtorrent_alerts.h b/bindings/c/include/libtorrent_alerts.h index 5e0bdf7eb50..38740b02b40 100644 --- a/bindings/c/include/libtorrent_alerts.h +++ b/bindings/c/include/libtorrent_alerts.h @@ -97,6 +97,7 @@ enum alert_types_t { ALERT_ALERTS_DROPPED = 95, ALERT_SOCKS5 = 96, ALERT_FILE_PRIO = 97, + ALERT_OVERSIZED_FILE = 98, }; #endif // LIBTORRENT_ALERTS_H diff --git a/bindings/python/src/peer_info.cpp b/bindings/python/src/peer_info.cpp index 2408b254394..0792395288d 100644 --- a/bindings/python/src/peer_info.cpp +++ b/bindings/python/src/peer_info.cpp @@ -94,7 +94,7 @@ void bind_peer_info() .def_readonly("downloading_progress", &peer_info::downloading_progress) .def_readonly("downloading_total", &peer_info::downloading_total) .add_property("client", get_peer_info_client) - .def_readonly("connection_type", &peer_info::connection_type) + .add_property("connection_type", make_getter(&peer_info::connection_type, by_value())) .def_readonly("pending_disk_bytes", &peer_info::pending_disk_bytes) .def_readonly("send_quota", &peer_info::send_quota) .def_readonly("receive_quota", &peer_info::receive_quota) diff --git a/bindings/python/src/session_settings.cpp b/bindings/python/src/session_settings.cpp index 3e479283f96..ef958dda58c 100644 --- a/bindings/python/src/session_settings.cpp +++ b/bindings/python/src/session_settings.cpp @@ -63,6 +63,7 @@ void bind_session_settings() .value("disable_os_cache_for_aligned_files", settings_pack::disable_os_cache_for_aligned_files) #endif .value("disable_os_cache", settings_pack::disable_os_cache) + .value("write_through", settings_pack::write_through) ; enum_("bandwidth_mixed_algo_t") diff --git a/examples/Jamfile b/examples/Jamfile index 2f6ec04a943..6413377ebc6 100644 --- a/examples/Jamfile +++ b/examples/Jamfile @@ -14,11 +14,12 @@ project client_test multi /torrent//torrent darwin:-Wno-unused-command-line-argument # disable warning C4275: non DLL-interface classkey 'identifier' used as base for DLL-interface classkey 'identifier' - msvc:/wd4275 + msvc:/wd4275 # C4268: 'identifier' : 'const' static/global data initialized # with compiler generated default constructor fills the object with zeros - msvc:/wd4268 - msvc:/wd4373 + msvc:/wd4268 + msvc:/wd4373 + clang:-Wno-implicit-int-float-conversion @warnings : default-build static diff --git a/examples/client_test.cpp b/examples/client_test.cpp index a979b1d7fae..a6fbe53d936 100644 --- a/examples/client_test.cpp +++ b/examples/client_test.cpp @@ -610,6 +610,7 @@ void assign_setting(lt::settings_pack& settings, std::string const& key, char co {"anti_leech"_sv, settings_pack::anti_leech}, {"enable_os_cache"_sv, settings_pack::enable_os_cache}, {"disable_os_cache"_sv, settings_pack::disable_os_cache}, + {"write_through"_sv, settings_pack::write_through}, {"prefer_tcp"_sv, settings_pack::prefer_tcp}, {"peer_proportional"_sv, settings_pack::peer_proportional}, {"pe_forced"_sv, settings_pack::pe_forced}, diff --git a/include/libtorrent/alert_types.hpp b/include/libtorrent/alert_types.hpp index 2b039052a81..bf38b32c821 100644 --- a/include/libtorrent/alert_types.hpp +++ b/include/libtorrent/alert_types.hpp @@ -73,7 +73,7 @@ namespace libtorrent { constexpr int user_alert_id = 10000; // this constant represents "max_alert_index" + 1 - constexpr int num_alert_types = 98; + constexpr int num_alert_types = 99; // internal constexpr int abi_alert_count = 128; @@ -2933,6 +2933,26 @@ TORRENT_VERSION_NAMESPACE_3 TORRENT_VERSION_NAMESPACE_3_END + // this alert may be posted when the initial checking of resume data and files + // on disk (just existence, not piece hashes) completes. If a file belonging + // to the torrent is found on disk, but is larger than the file in the + // torrent, that's when this alert is posted. + // the client may want to call truncate_files() in that case, or perhaps + // interpret it as a sign that some other file is in the way, that shouldn't + // be overwritten. + struct TORRENT_EXPORT oversized_file_alert final : torrent_alert + { + // internal + explicit oversized_file_alert(aux::stack_allocator& alloc, torrent_handle h); + TORRENT_DEFINE_ALERT(oversized_file_alert, 98) + + static constexpr alert_category_t static_category = alert_category::storage; + std::string message() const override; + + // hidden + file_index_t reserved; + }; + // internal TORRENT_EXTRA_EXPORT char const* performance_warning_str(performance_alert::performance_warning_t i); diff --git a/include/libtorrent/assert.hpp b/include/libtorrent/assert.hpp index 9038549eeb1..a9541aa1751 100644 --- a/include/libtorrent/assert.hpp +++ b/include/libtorrent/assert.hpp @@ -70,12 +70,12 @@ extern TORRENT_EXPORT char const* libtorrent_assert_log; #if TORRENT_USE_IOSTREAM #define TORRENT_ASSERT_VAL(x, y) \ - do { if (x) {} else { std::stringstream __s__; __s__ << #y ": " << y; \ - libtorrent::assert_fail(#x, __LINE__, __FILE__, __func__, __s__.str().c_str(), 0); } } TORRENT_WHILE_0 + do { if (x) {} else { std::stringstream _s; _s << #y ": " << y; \ + libtorrent::assert_fail(#x, __LINE__, __FILE__, __func__, _s.str().c_str(), 0); } } TORRENT_WHILE_0 #define TORRENT_ASSERT_FAIL_VAL(y) \ - do { std::stringstream __s__; __s__ << #y ": " << y; \ - libtorrent::assert_fail("", __LINE__, __FILE__, __func__, __s__.str().c_str(), 0); } TORRENT_WHILE_0 + do { std::stringstream _s; _s << #y ": " << y; \ + libtorrent::assert_fail("", __LINE__, __FILE__, __func__, _s.str().c_str(), 0); } TORRENT_WHILE_0 #else #define TORRENT_ASSERT_VAL(x, y) TORRENT_ASSERT(x) diff --git a/include/libtorrent/aux_/disk_buffer_pool.hpp b/include/libtorrent/aux_/disk_buffer_pool.hpp index e37b62a7822..c6fcd1ffd47 100644 --- a/include/libtorrent/aux_/disk_buffer_pool.hpp +++ b/include/libtorrent/aux_/disk_buffer_pool.hpp @@ -24,7 +24,7 @@ see LICENSE file. #include "libtorrent/io_context.hpp" #include "libtorrent/span.hpp" -#include "libtorrent/aux_/storage_utils.hpp" // for iovec_t +#include "libtorrent/disk_buffer_holder.hpp" // for buffer_allocator_interface namespace libtorrent { @@ -33,7 +33,8 @@ namespace libtorrent { namespace aux { - struct TORRENT_EXTRA_EXPORT disk_buffer_pool + struct TORRENT_EXTRA_EXPORT disk_buffer_pool final + : buffer_allocator_interface { explicit disk_buffer_pool(io_context& ios); ~disk_buffer_pool(); @@ -43,6 +44,7 @@ namespace aux { char* allocate_buffer(char const* category); char* allocate_buffer(bool& exceeded, std::shared_ptr o , char const* category); + void free_disk_buffer(char* b) override { free_buffer(b); } void free_buffer(char* buf); void free_multiple_buffers(span bufvec); diff --git a/include/libtorrent/aux_/mmap.hpp b/include/libtorrent/aux_/mmap.hpp index dfe60c0a0a2..52079dc939e 100644 --- a/include/libtorrent/aux_/mmap.hpp +++ b/include/libtorrent/aux_/mmap.hpp @@ -128,6 +128,10 @@ namespace aux { // anytime soon void dont_need(span range); + // hint the kernel that the given (dirty) range of pages should be + // flushed to disk + void page_out(span range); + std::int64_t m_size; #if TORRENT_HAVE_MAP_VIEW_OF_FILE file_mapping_handle m_file; @@ -162,6 +166,13 @@ namespace aux { m_mapping->dont_need(range); } + void page_out(span range) + { + TORRENT_ASSERT(m_mapping); + m_mapping->page_out(range); + } + + private: explicit file_view(std::shared_ptr m) : m_mapping(std::move(m)) {} std::shared_ptr m_mapping; diff --git a/include/libtorrent/aux_/mmap_storage.hpp b/include/libtorrent/aux_/mmap_storage.hpp index 55179ca2fc2..709278d0530 100644 --- a/include/libtorrent/aux_/mmap_storage.hpp +++ b/include/libtorrent/aux_/mmap_storage.hpp @@ -69,7 +69,7 @@ namespace libtorrent::aux { , storage_error&); void release_files(storage_error&); void delete_files(remove_flags_t options, storage_error&); - void initialize(settings_interface const&, storage_error&); + status_t initialize(settings_interface const&, storage_error&); std::pair move_storage(std::string save_path , move_flags_t, storage_error&); bool verify_resume_data(add_torrent_params const& rd @@ -78,9 +78,13 @@ namespace libtorrent::aux { bool tick(); int readv(settings_interface const&, span bufs - , piece_index_t piece, int offset, aux::open_mode_t mode, storage_error&); + , piece_index_t piece, int offset, aux::open_mode_t mode + , disk_job_flags_t flags + , storage_error&); int writev(settings_interface const&, span bufs - , piece_index_t piece, int offset, aux::open_mode_t mode, storage_error&); + , piece_index_t piece, int offset, aux::open_mode_t mode + , disk_job_flags_t flags + , storage_error&); int hashv(settings_interface const&, hasher& ph, std::ptrdiff_t len , piece_index_t piece, int offset, aux::open_mode_t mode , disk_job_flags_t flags, storage_error&); diff --git a/include/libtorrent/aux_/path.hpp b/include/libtorrent/aux_/path.hpp index 2533141eb07..26b9cd72876 100644 --- a/include/libtorrent/aux_/path.hpp +++ b/include/libtorrent/aux_/path.hpp @@ -94,8 +94,6 @@ namespace libtorrent { TORRENT_EXTRA_EXPORT bool exists(std::string const& f, error_code& ec); TORRENT_EXTRA_EXPORT bool is_directory(std::string const& f , error_code& ec); - TORRENT_EXTRA_EXPORT void recursive_copy(std::string const& old_path - , std::string const& new_path, error_code& ec); TORRENT_EXTRA_EXPORT void copy_file(std::string const& f , std::string const& newf, error_code& ec); TORRENT_EXTRA_EXPORT void move_file(std::string const& f diff --git a/include/libtorrent/aux_/posix_storage.hpp b/include/libtorrent/aux_/posix_storage.hpp index b6432cda607..78b4e1ed536 100644 --- a/include/libtorrent/aux_/posix_storage.hpp +++ b/include/libtorrent/aux_/posix_storage.hpp @@ -62,7 +62,7 @@ namespace aux { void rename_file(file_index_t const index, std::string const& new_filename, storage_error& ec); - void initialize(settings_interface const&, storage_error& ec); + status_t initialize(settings_interface const&, storage_error& ec); private: diff --git a/include/libtorrent/aux_/storage_free_list.hpp b/include/libtorrent/aux_/storage_free_list.hpp index ca8e86ad074..d4c766ba113 100644 --- a/include/libtorrent/aux_/storage_free_list.hpp +++ b/include/libtorrent/aux_/storage_free_list.hpp @@ -29,6 +29,8 @@ namespace aux { void add(storage_index_t const i) { m_free_slots.push_back(i); } + std::size_t size() const { return m_free_slots.size(); } + private: storage_index_t pop() diff --git a/include/libtorrent/aux_/storage_utils.hpp b/include/libtorrent/aux_/storage_utils.hpp index 837df7c439c..23627bae1af 100644 --- a/include/libtorrent/aux_/storage_utils.hpp +++ b/include/libtorrent/aux_/storage_utils.hpp @@ -92,6 +92,7 @@ namespace aux { , aux::vector const& file_priority , std::function create_file , std::function create_link + , std::function oversized_file , storage_error& ec); TORRENT_EXTRA_EXPORT void create_symlink( diff --git a/include/libtorrent/aux_/store_buffer.hpp b/include/libtorrent/aux_/store_buffer.hpp index fe120115920..40b67f29b9a 100644 --- a/include/libtorrent/aux_/store_buffer.hpp +++ b/include/libtorrent/aux_/store_buffer.hpp @@ -109,6 +109,11 @@ struct store_buffer m_store_buffer.erase(it); } + std::size_t size() const + { + return m_store_buffer.size(); + } + private: mutable std::mutex m_mutex; diff --git a/include/libtorrent/config.hpp b/include/libtorrent/config.hpp index eee2abff242..6e3dfc9e710 100644 --- a/include/libtorrent/config.hpp +++ b/include/libtorrent/config.hpp @@ -76,11 +76,15 @@ see LICENSE file. #if defined __APPLE__ -#define TORRENT_NATIVE_UTF8 1 - #include #include +#if defined __MACH__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050 +#define TORRENT_HAS_COPYFILE 1 +#endif + +#define TORRENT_NATIVE_UTF8 1 + #if MAC_OS_X_VERSION_MIN_REQUIRED >= 1070 // on OSX, use the built-in common crypto for built-in # if !defined TORRENT_USE_LIBCRYPTO && !defined TORRENT_USE_LIBGCRYPT @@ -131,6 +135,10 @@ see LICENSE file. #define TORRENT_HAVE_MMAP 1 #endif +#if defined __GLIBC__ && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 27)) +#define TORRENT_HAS_COPY_FILE_RANGE 1 +#endif + #define TORRENT_HAS_PTHREAD_SET_NAME 1 #define TORRENT_HAS_SYMLINK 1 #define TORRENT_USE_MADVISE 1 @@ -503,6 +511,14 @@ see LICENSE file. #define TORRENT_HAS_PTHREAD_SET_NAME 0 #endif +#ifndef TORRENT_HAS_COPY_FILE_RANGE +#define TORRENT_HAS_COPY_FILE_RANGE 0 +#endif + +#ifndef TORRENT_HAS_COPYFILE +#define TORRENT_HAS_COPYFILE 0 +#endif + // debug builds have asserts enabled by default, release // builds have asserts if they are explicitly enabled by // the release_asserts macro. diff --git a/include/libtorrent/disk_interface.hpp b/include/libtorrent/disk_interface.hpp index cd28d1687e1..a85a48bdb93 100644 --- a/include/libtorrent/disk_interface.hpp +++ b/include/libtorrent/disk_interface.hpp @@ -150,6 +150,10 @@ namespace file_open_mode { // hash does not need to be computed. static inline constexpr disk_job_flags_t v1_hash = 5_bit; + // this flag instructs a hash job that we just completed this piece, and + // it should be flushed to disk + static constexpr disk_job_flags_t flush_piece = 7_bit; + // this is called when a new torrent is added. The shared_ptr can be // used to hold the internal torrent object alive as long as there are // outstanding disk operations on the storage. diff --git a/include/libtorrent/fwd.hpp b/include/libtorrent/fwd.hpp index 9f0ef94d136..e1fd6289c07 100644 --- a/include/libtorrent/fwd.hpp +++ b/include/libtorrent/fwd.hpp @@ -121,6 +121,7 @@ struct alerts_dropped_alert; struct socks5_alert; struct file_prio_alert; TORRENT_VERSION_NAMESPACE_3_END +struct oversized_file_alert; // include/libtorrent/announce_entry.hpp TORRENT_VERSION_NAMESPACE_2 diff --git a/include/libtorrent/libtorrent.hpp b/include/libtorrent/libtorrent.hpp index 6c9123a2d9c..8d8ea1a962a 100644 --- a/include/libtorrent/libtorrent.hpp +++ b/include/libtorrent/libtorrent.hpp @@ -104,6 +104,7 @@ #include "libtorrent/torrent_info.hpp" #include "libtorrent/torrent_status.hpp" #include "libtorrent/tracker_event.hpp" +#include "libtorrent/truncate.hpp" #include "libtorrent/units.hpp" #include "libtorrent/upnp.hpp" #include "libtorrent/version.hpp" diff --git a/include/libtorrent/settings_pack.hpp b/include/libtorrent/settings_pack.hpp index 7b84a676aa9..58026b21ffa 100644 --- a/include/libtorrent/settings_pack.hpp +++ b/include/libtorrent/settings_pack.hpp @@ -641,19 +641,16 @@ namespace aux { // will go straight to download mode. no_recheck_incomplete_resume, - // ``anonymous_mode``: When set to true, the client - // tries to hide its identity to a certain degree. The user-agent will be - // reset to an empty string (except for private torrents). Trackers - // will only be used if they are using a proxy server. - // The listen sockets are closed, and incoming - // connections will only be accepted through a SOCKS5 or I2P proxy (if - // a peer proxy is set up and is run on the same machine as the - // tracker proxy). Since no incoming connections are accepted, - // NAT-PMP, UPnP, DHT and local peer discovery are all turned off when - // this setting is enabled. - // - // If you're using I2P, it might make sense to enable anonymous mode - // as well. + // ``anonymous_mode``: When set to true, the client tries to hide + // its identity to a certain degree. + // + // * A generic user-agent will be + // used for trackers (except for private torrents). + // * Your local IPv4 and IPv6 address won't be sent as query string + // parameters to private trackers. + // * If announce_ip is configured, it will not be sent to trackers + // * The client version will not be sent to peers in the extension + // handshake. anonymous_mode, // specifies whether downloads from web seeds is reported to the @@ -1253,6 +1250,8 @@ namespace aux { // potentially evict all other processes' cache by simply handling // high throughput and large files. If libtorrent's read cache is // disabled, enabling this may reduce performance. + // write_through + // flush pieces to disk as they complete validation. // // One reason to disable caching is that it may help the operating // system from growing its file cache indefinitely. @@ -2069,7 +2068,9 @@ namespace aux { #else deprecated_disable_os_cache_for_aligned_files = 1, #endif - disable_os_cache = 2 + disable_os_cache = 2, + + write_through = 3, }; enum bandwidth_mixed_algo_t : std::uint8_t diff --git a/include/libtorrent/storage_defs.hpp b/include/libtorrent/storage_defs.hpp index 98176ad2479..05ac31d458f 100644 --- a/include/libtorrent/storage_defs.hpp +++ b/include/libtorrent/storage_defs.hpp @@ -43,9 +43,32 @@ namespace libtorrent { no_error, fatal_disk_error, need_full_check, - file_exist + file_exist, + + // hidden + mask = 0xf, + + // this is not an enum value, but a flag that can be set in the return + // from async_check_files, in case an existing file was found larger than + // specified in the torrent. i.e. it has garbage at the end + // the status_t field is used for this to preserve ABI. + oversized_file = 0x10, }; + // internal + inline status_t operator|(status_t lhs, status_t rhs) + { + return status_t(static_cast(lhs) | static_cast(rhs)); + } + inline status_t operator&(status_t lhs, status_t rhs) + { + return status_t(static_cast(lhs) & static_cast(rhs)); + } + inline status_t operator~(status_t lhs) + { + return status_t(~static_cast(lhs)); + } + // flags for async_move_storage enum class move_flags_t : std::uint8_t { diff --git a/include/libtorrent/torrent_handle.hpp b/include/libtorrent/torrent_handle.hpp index d4b38ea3313..46726f62c98 100644 --- a/include/libtorrent/torrent_handle.hpp +++ b/include/libtorrent/torrent_handle.hpp @@ -1058,6 +1058,10 @@ namespace aux { // When combining file- and piece priorities, the resume file will record // both. When loading the resume data, the file priorities will be applied // first, then the piece priorities. + // + // Moving data from a file into the part file is currently not + // supported. If a file has its priority set to 0 *after* it has already + // been created, it will not be moved into the partfile. void file_priority(file_index_t index, download_priority_t priority) const; download_priority_t file_priority(file_index_t index) const; void prioritize_files(std::vector const& files) const; @@ -1236,6 +1240,11 @@ namespace aux { // torrent but are stored in the torrent's directory may be moved as // well. This goes for files that have been renamed to absolute paths // that still end up inside the save path. + // + // When copying files, sparse regions are not likely to be preserved. + // This makes it proportionally more expensive to move a large torrent + // when only few pieces have been downloaded, since the files are then + // allocated with zeros in the destination directory. void move_storage(std::string const& save_path , move_flags_t flags = move_flags_t::always_replace_files ) const; diff --git a/include/libtorrent/truncate.hpp b/include/libtorrent/truncate.hpp new file mode 100644 index 00000000000..0786c81e475 --- /dev/null +++ b/include/libtorrent/truncate.hpp @@ -0,0 +1,48 @@ +/* + +Copyright (c) 2022, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#ifndef TORRENT_TRUNCATE_HPP_INCLUDED +#define TORRENT_TRUNCATE_HPP_INCLUDED + +#include "libtorrent/fwd.hpp" +#include "libtorrent/error_code.hpp" +#include + +namespace libtorrent { + +// Truncates files larger than specified in the file_storage, saved under +// the specified save_path. +TORRENT_EXPORT void truncate_files(file_storage const& fs, std::string const& save_path, storage_error& ec); + +} + +#endif diff --git a/simulation/Jamfile b/simulation/Jamfile index 6f212d17e64..41f8b24687d 100644 --- a/simulation/Jamfile +++ b/simulation/Jamfile @@ -18,9 +18,9 @@ project utils.cpp disk_io.cpp transfer_sim.cpp - msvc:/wd4275 - msvc:/wd4005 - msvc:/wd4268 + msvc:/wd4275 + msvc:/wd4005 + msvc:/wd4268 : default-build multi full diff --git a/simulation/create_torrent.cpp b/simulation/create_torrent.cpp index 5758deb79a1..df65694ca1e 100644 --- a/simulation/create_torrent.cpp +++ b/simulation/create_torrent.cpp @@ -17,7 +17,7 @@ see LICENSE file. std::string save_path(int idx) { - int const swarm_id = test_counter(); + int const swarm_id = unit_test::test_counter(); char path[200]; std::snprintf(path, sizeof(path), "swarm-%04d-peer-%02d" , swarm_id, idx); @@ -30,7 +30,7 @@ lt::add_torrent_params create_torrent(int const idx, bool const seed // TODO: if we want non-seeding torrents, that could be a bit cheaper to // create lt::add_torrent_params params; - int swarm_id = test_counter(); + int swarm_id = unit_test::test_counter(); char name[200]; std::snprintf(name, sizeof(name), "temp-%02d", swarm_id); std::string path = save_path(idx); diff --git a/simulation/setup_swarm.cpp b/simulation/setup_swarm.cpp index f19c42df2a4..e72906ab15f 100644 --- a/simulation/setup_swarm.cpp +++ b/simulation/setup_swarm.cpp @@ -189,7 +189,7 @@ void setup_swarm(int num_nodes lt::aux::deadline_timer timer(ios); lt::error_code ec; - int const swarm_id = test_counter(); + int const swarm_id = unit_test::test_counter(); std::string path = save_path(swarm_id, 0); std::shared_ptr ti; diff --git a/simulation/test_error_handling.cpp b/simulation/test_error_handling.cpp index 7b1a4d89417..26bdaba5b9e 100644 --- a/simulation/test_error_handling.cpp +++ b/simulation/test_error_handling.cpp @@ -174,7 +174,7 @@ TORRENT_TEST(error_handling) // this will clear the history of all output we've printed so far. // if we encounter an error from now on, we'll only print the relevant // iteration - reset_output(); + unit_test::reset_output(); // re-seed the random engine each iteration, to make the runs // deterministic diff --git a/simulation/test_swarm.cpp b/simulation/test_swarm.cpp index 8d93e7749d2..23c7dcf64a5 100644 --- a/simulation/test_swarm.cpp +++ b/simulation/test_swarm.cpp @@ -863,7 +863,7 @@ TORRENT_TEST(pex) lt::aux::deadline_timer timer(ios); lt::error_code ec; - int const swarm_id = test_counter(); + int const swarm_id = unit_test::test_counter(); std::string path = save_path(swarm_id, 0); lt::create_directory(path, ec); diff --git a/simulation/test_tracker.cpp b/simulation/test_tracker.cpp index a64b6ccc5df..136dd5cea18 100644 --- a/simulation/test_tracker.cpp +++ b/simulation/test_tracker.cpp @@ -1300,8 +1300,8 @@ TORRENT_TEST(tracker_user_agent_privacy_mode_public_torrent) { got_announce = true; - // in anonymous mode we should not send a user agent - TEST_CHECK(headers["user-agent"] == ""); + // in anonymous mode we should send a generic user agent + TEST_CHECK(headers["user-agent"] == "curl/7.81.0"); return sim::send_response(200, "OK", 11) + "d5:peers0:e"; } , [](torrent_handle h) {} diff --git a/simulation/test_transfer_matrix.cpp b/simulation/test_transfer_matrix.cpp index b4adebba547..8f393a3d229 100644 --- a/simulation/test_transfer_matrix.cpp +++ b/simulation/test_transfer_matrix.cpp @@ -84,14 +84,14 @@ TORRENT_TEST(transfer_matrix) // this will clear the history of all output we've printed so far. // if we encounter an error from now on, we'll only print the relevant // iteration - reset_output(); + ::unit_test::reset_output(); // re-seed the random engine each iteration, to make the runs // deterministic lt::aux::random_engine().seed(0x23563a7f); run_matrix_test(piece_size | bt_version | magnet | multi_file, files, corruption); - if (_g_test_failures > 0) return; + if (::unit_test::g_test_failures > 0) return; } } diff --git a/src/alert.cpp b/src/alert.cpp index 9e82a3fffc7..f5054c007e0 100644 --- a/src/alert.cpp +++ b/src/alert.cpp @@ -2924,7 +2924,7 @@ namespace { "picker_log", "session_error", "dht_live_nodes", "session_stats_header", "dht_sample_infohashes", "block_uploaded", "alerts_dropped", "socks5", - "file_prio" + "file_prio", "oversized_file" }}; TORRENT_ASSERT(alert_type >= 0); @@ -2983,4 +2983,17 @@ namespace { #endif } + oversized_file_alert::oversized_file_alert(aux::stack_allocator& a, torrent_handle h) + : torrent_alert(a, std::move(h)) + {} + + std::string oversized_file_alert::message() const + { +#ifdef TORRENT_DISABLE_ALERT_MSG + return {}; +#else + return torrent_alert::message() + " has an oversized file"; +#endif + } + } // namespace libtorrent diff --git a/src/copy_file.cpp b/src/copy_file.cpp new file mode 100644 index 00000000000..73bdeb2502e --- /dev/null +++ b/src/copy_file.cpp @@ -0,0 +1,427 @@ +/* + +Copyright (c) 2022, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "libtorrent/config.hpp" + +#include "libtorrent/error_code.hpp" +#include "libtorrent/aux_/path.hpp" + +#ifdef TORRENT_WINDOWS +// windows part +#include "libtorrent/aux_/windows.hpp" +#else + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifndef _XOPEN_SOURCE +#define _XOPEN_SOURCE 600 +#endif + +#include +#include + +#if TORRENT_HAS_COPYFILE +#include +#endif + +#endif + +namespace libtorrent { + +#ifdef TORRENT_WINDOWS +namespace { + +// returns true if the given file has any regions that are +// sparse, i.e. not allocated. This is similar to calling lseek(SEEK_DATA) and +// lseek(SEEK_HOLE) +std::pair next_allocated_region(HANDLE file + , std::int64_t const offset + , std::int64_t file_size + , error_code& ec) +{ +#ifndef FSCTL_QUERY_ALLOCATED_RANGES + typedef struct _FILE_ALLOCATED_RANGE_BUFFER { + LARGE_INTEGER FileOffset; + LARGE_INTEGER Length; + } FILE_ALLOCATED_RANGE_BUFFER; +#define FSCTL_QUERY_ALLOCATED_RANGES ((0x9 << 16) | (1 << 14) | (51 << 2) | 3) +#endif + FILE_ALLOCATED_RANGE_BUFFER in; + in.FileOffset.QuadPart = offset; + in.Length.QuadPart = file_size - offset; + + FILE_ALLOCATED_RANGE_BUFFER out; + + DWORD returned_bytes = 0; + BOOL const ret = DeviceIoControl(file, FSCTL_QUERY_ALLOCATED_RANGES + , static_cast(&in), sizeof(in) + , &out, sizeof(out), &returned_bytes, nullptr); + + if (ret == FALSE) + { + int const error = ::GetLastError(); + // we expect this error, since we just ask for one allocated range at a + // time. + if (error != ERROR_MORE_DATA) + { + ec.assign(error, system_category()); + return {0, 0}; + } + } + + if (returned_bytes != sizeof(out)) { + return {file_size, file_size}; + } + + return {out.FileOffset.QuadPart, out.FileOffset.QuadPart + out.Length.QuadPart}; +} + +struct file_handle +{ + file_handle(HANDLE h) : m_h(h) {} + + ~file_handle() + { + if (m_h != INVALID_HANDLE_VALUE) ::CloseHandle(m_h); + } + + file_handle(file_handle const&) = delete; + file_handle(file_handle&& rhs) + : m_h(rhs.m_h) + { + rhs.m_h = INVALID_HANDLE_VALUE; + } + HANDLE handle() const { return m_h; } +private: + HANDLE m_h; +}; + +void copy_range(HANDLE const in_handle, HANDLE const out_handle + , std::int64_t in_offset, std::int64_t len, error_code& ec) +{ + char buffer[16384]; + while (len > 0) + { + OVERLAPPED in_ol{}; + in_ol.Offset = in_offset & 0xffffffff; + in_ol.OffsetHigh = in_offset >> 32; + DWORD num_read = 0; + if (ReadFile(in_handle, buffer, DWORD(std::min(len, std::int64_t(sizeof(buffer)))) + , &num_read, &in_ol) == 0) + { + int const error = ::GetLastError(); + if (error == ERROR_HANDLE_EOF) return; + + ec.assign(error, system_category()); + return; + } + + len -= num_read; + int buf_offset = 0; + while (num_read > 0) + { + OVERLAPPED out_ol{}; + out_ol.Offset = in_offset & 0xffffffff; + out_ol.OffsetHigh = in_offset >> 32; + DWORD num_written = 0; + if (WriteFile(out_handle, buffer + buf_offset, DWORD(num_read - buf_offset) + , &num_written, &out_ol) == 0) + { + ec.assign(::GetLastError(), system_category()); + return; + } + buf_offset += num_written; + num_read -= num_written; + in_offset += num_written; + } + } + return; +} + +} + +void copy_file(std::string const& inf, std::string const& newf, error_code& ec) +{ + ec.clear(); + native_path_string f1 = convert_to_native_path_string(inf); + native_path_string f2 = convert_to_native_path_string(newf); + + WIN32_FILE_ATTRIBUTE_DATA in_stat; + if (!GetFileAttributesExW(f1.c_str(), GetFileExInfoStandard, &in_stat)) + { + ec.assign(GetLastError(), system_category()); + return; + } + + if ((in_stat.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) == 0) + { + // if the input file is not sparse, use the system copy function + if (CopyFileW(f1.c_str(), f2.c_str(), false) == 0) + ec.assign(GetLastError(), system_category()); + return; + } + + std::int64_t const in_size = (std::int64_t(in_stat.nFileSizeHigh) << 32) + | in_stat.nFileSizeLow; + +#ifdef TORRENT_WINRT + file_handle in_handle = ::CreateFile2(f1.c_str() + , GENERIC_READ + , FILE_SHARE_READ + , OPEN_EXISTING + , nullptr); +#else + file_handle in_handle = ::CreateFileW(f1.c_str() + , GENERIC_READ + , FILE_SHARE_READ + , nullptr + , OPEN_EXISTING + , FILE_FLAG_SEQUENTIAL_SCAN + , nullptr); +#endif + if (in_handle.handle() == INVALID_HANDLE_VALUE) + { + ec.assign(GetLastError(), system_category()); + return; + } + +#ifdef TORRENT_WINRT + file_handle out_handle = ::CreateFile2(f1.c_str() + , GENERIC_WRITE + , FILE_SHARE_WRITE + , OPEN_ALWAYS + , nullptr); +#else + file_handle out_handle = ::CreateFileW(f2.c_str() + , GENERIC_WRITE + , FILE_SHARE_WRITE + , nullptr + , OPEN_ALWAYS + , FILE_FLAG_WRITE_THROUGH + , nullptr); +#endif + if (out_handle.handle() == INVALID_HANDLE_VALUE) + { + ec.assign(GetLastError(), system_category()); + return; + } + + DWORD temp; + if (::DeviceIoControl(out_handle.handle(), FSCTL_SET_SPARSE + , nullptr, 0, nullptr, 0, &temp, nullptr) == 0) + { + ec.assign(GetLastError(), system_category()); + return; + } + + std::pair data(0, 0); + for (;;) + { + data = next_allocated_region(in_handle.handle(), data.second, in_size, ec); + if (ec) return; + + copy_range(in_handle.handle(), out_handle.handle(), data.first, data.second - data.first, ec); + if (ec) return; + // There's a possible time-of-check-time-of-use race here. + // The source file may have grown during the copy operation, in which + // case data.second may exceed the initial size + if (data.second >= in_size) return; + } +} + +#else +// Generic/linux implementation + +namespace { + +struct file_descriptor +{ + file_descriptor(int fd) : m_fd(fd) {} + + ~file_descriptor() + { + if (m_fd >= 0) ::close(m_fd); + } + + file_descriptor(file_descriptor const&) = delete; + file_descriptor(file_descriptor&& rhs) = delete; + int fd() const { return m_fd; } +private: + int m_fd; +}; + +ssize_t copy_range(int const fd_in, int const fd_out, off_t in_offset + , std::int64_t len, error_code& ec) +{ +#if TORRENT_HAS_COPY_FILE_RANGE + off_t out_offset = in_offset; + ssize_t ret = 0; + do + { + ret = ::copy_file_range(fd_in, &in_offset + , fd_out, &out_offset, std::size_t(len), 0); + if (ret < 0) + { + ec.assign(errno, system_category()); + return -1; + } + + len -= ret; + } while (len > 0 && ret > 0); + return ret; +#else + char buffer[16384]; + ssize_t total_copied = 0; + while (len > 0) + { + ssize_t num_read = ::pread(fd_in, buffer + , std::size_t(std::min(len, std::int64_t(sizeof(buffer)))), in_offset); + if (num_read == 0) return total_copied; + if (num_read < 0) + { + ec.assign(errno, system_category()); + return -1; + } + len -= num_read; + int buf_offset = 0; + while (num_read > 0) + { + auto const ret = ::pwrite(fd_out, buffer + buf_offset + , std::size_t(num_read - buf_offset), in_offset); + if (ret <= 0) + { + ec.assign(errno, system_category()); + return -1; + } + buf_offset += ret; + num_read -= ret; + in_offset += ret; + total_copied += ret; + } + } + return total_copied; +#endif +} + +} // anonymous namespace + +void copy_file(std::string const& inf, std::string const& newf, error_code& ec) +{ + ec.clear(); + native_path_string f1 = convert_to_native_path_string(inf); + native_path_string f2 = convert_to_native_path_string(newf); + + file_descriptor const infd = ::open(f1.c_str(), O_RDONLY); + if (infd.fd() < 0) + { + ec.assign(errno, system_category()); + return; + } + + struct stat in_stat; + if (::fstat(infd.fd(), &in_stat) != 0) + { + ec.assign(errno, system_category()); + return; + } + + bool const input_is_sparse = in_stat.st_size > off_t(in_stat.st_blocks) * 512; + + // if the source file is not sparse we'll end up copying every byte anyway, + // there's no point in passing O_TRUNC. However, in order to preserve sparse + // regions, we *do* need to truncate the output file. + file_descriptor const outfd = ::open(f2.c_str() + , input_is_sparse ? (O_RDWR | O_CREAT | O_TRUNC) : (O_RDWR | O_CREAT), in_stat.st_mode); + if (outfd.fd() < 0) + { + ec.assign(errno, system_category()); + return; + } + +#if TORRENT_HAS_COPYFILE + if (!input_is_sparse) + { + // the the file isn't sparse use the system copy function (which + // expands sparse regions) + // this only works on 10.5 + copyfile_state_t state = copyfile_state_alloc(); + if (fcopyfile(infd.fd(), outfd.fd(), state, COPYFILE_ALL) < 0) + ec.assign(errno, system_category()); + copyfile_state_free(state); + return; + } +#endif + + if (::ftruncate(outfd.fd(), in_stat.st_size) < 0) + { + ec.assign(errno, system_category()); + return; + } + +#ifdef SEEK_HOLE + if (input_is_sparse) + { + ssize_t ret = 0; + off_t data_start = 0; + off_t data_end = 0; + for (;;) + { + data_start = ::lseek(infd.fd(), data_end, SEEK_DATA); + if (data_start == off_t(-1)) + { + ec.assign(errno, system_category()); + return; + } + + data_end = ::lseek(infd.fd(), data_start, SEEK_HOLE); + if (data_end == off_t(-1)) + { + ec.assign(errno, system_category()); + return; + } + + ret = copy_range(infd.fd(), outfd.fd(), data_start, data_end - data_start, ec); + if (ret <= 0) return; + if (data_end == in_stat.st_size) return; + } + } +#endif + + copy_range(infd.fd(), outfd.fd(), 0, in_stat.st_size, ec); +} + +#endif // TORRENT_WINDOWS + +} + diff --git a/src/create_torrent.cpp b/src/create_torrent.cpp index 22b79798bd1..4fb3dc756c3 100644 --- a/src/create_torrent.cpp +++ b/src/create_torrent.cpp @@ -140,6 +140,7 @@ namespace { auto const file_piece_offset = piece - file_first_piece; auto const file_size = st->ct.files().file_size(current_file); + TORRENT_ASSERT(file_size > 0); auto const file_blocks = st->ct.files().file_num_blocks(current_file); auto const piece_blocks = st->ct.files().blocks_in_piece2(piece); int const num_leafs = merkle_num_leafs(file_blocks); @@ -463,6 +464,7 @@ namespace { { // don't include merkle hash trees for pad files if (m_files.pad_file_at(i)) continue; + if (m_files.file_size(i) == 0) continue; auto const file_size = m_files.file_size(i); if (file_size <= m_files.piece_length()) @@ -872,6 +874,7 @@ namespace { TORRENT_ASSERT_PRECOND(piece < piece_index_t::diff_type(m_files.file_num_pieces(file))); TORRENT_ASSERT_PRECOND(!m_files.pad_file_at(file)); TORRENT_ASSERT_PRECOND(!h.is_all_zeros()); + TORRENT_ASSERT_PRECOND(m_files.file_num_pieces(file) > 0); if (m_v1_only) aux::throw_ex(errors::invalid_hash_entry); diff --git a/src/http_tracker_connection.cpp b/src/http_tracker_connection.cpp index a1fad19ef6f..4bec5caed67 100644 --- a/src/http_tracker_connection.cpp +++ b/src/http_tracker_connection.cpp @@ -216,8 +216,11 @@ namespace libtorrent::aux { // in anonymous mode we omit the user agent to mitigate fingerprinting of // the client. Private torrents is an exception because some private // trackers may require the user agent - std::string const user_agent = settings.get_bool(settings_pack::anonymous_mode) - && !tracker_req().private_torrent ? "" : settings.get_str(settings_pack::user_agent); + bool const anon_user = settings.get_bool(settings_pack::anonymous_mode) + && !tracker_req().private_torrent; + std::string const user_agent = anon_user + ? "curl/7.81.0" + : settings.get_str(settings_pack::user_agent); // when sending stopped requests, prefer the cached DNS entry // to avoid being blocked for slow or failing responses. Chances diff --git a/src/mmap.cpp b/src/mmap.cpp index aecb6a31a7b..b927f035464 100644 --- a/src/mmap.cpp +++ b/src/mmap.cpp @@ -668,7 +668,9 @@ file_mapping::file_mapping(file_mapping&& rhs) void file_mapping::dont_need(span range) { - TORRENT_UNUSED(range); + auto* const start = const_cast(range.data()); + auto const size = static_cast(range.size()); + #if TORRENT_USE_MADVISE int const advise = 0 #if defined TORRENT_LINUX && defined MADV_COLD @@ -681,10 +683,34 @@ void file_mapping::dont_need(span range) ; if (advise) - madvise(const_cast(range.data()), static_cast(range.size()), advise); + ::madvise(start, size, advise); +#endif +#ifndef TORRENT_WINDOWS + ::msync(start, size, MS_INVALIDATE); +#else + TORRENT_UNUSED(start); + TORRENT_UNUSED(size); #endif } +void file_mapping::page_out(span range) +{ +#if TORRENT_HAVE_MAP_VIEW_OF_FILE + // ignore errors, this is best-effort + FlushViewOfFile(range.data(), static_cast(range.size())); +#else + + auto* const start = const_cast(range.data()); + auto const size = static_cast(range.size()); +#if TORRENT_USE_MADVISE && defined MADV_PAGEOUT + ::madvise(start, size, MADV_PAGEOUT); +#endif + + ::msync(start, size, MS_ASYNC); + +#endif // MAP_VIEW_OF_FILE +} + } // aux } // libtorrent diff --git a/src/mmap_disk_io.cpp b/src/mmap_disk_io.cpp index 5ff64f9a562..239d995e37f 100644 --- a/src/mmap_disk_io.cpp +++ b/src/mmap_disk_io.cpp @@ -129,7 +129,8 @@ namespace { return (flags & ~(disk_interface::force_copy | disk_interface::sequential_access | disk_interface::volatile_read - | disk_interface::v1_hash)) + | disk_interface::v1_hash + | disk_interface::flush_piece)) == disk_job_flags_t{}; } #endif @@ -141,7 +142,6 @@ using jobqueue_t = aux::tailqueue; // of disk io jobs struct TORRENT_EXTRA_EXPORT mmap_disk_io final : disk_interface - , buffer_allocator_interface { mmap_disk_io(io_context& ios, settings_interface const&, counters& cnt); #if TORRENT_USE_ASSERTS @@ -189,10 +189,6 @@ struct TORRENT_EXTRA_EXPORT mmap_disk_io final void async_clear_piece(storage_index_t storage, piece_index_t index , std::function handler) override; - // implements buffer_allocator_interface - void free_disk_buffer(char* b) override - { m_buffer_pool.free_buffer(b); } - void update_stats_counters(counters& c) const override; std::vector get_status(storage_index_t) const override; @@ -403,6 +399,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( void mmap_disk_io::remove_torrent(storage_index_t const idx) { + TORRENT_ASSERT(m_torrents[idx] != nullptr); m_torrents[idx].reset(); m_free_slots.add(idx); } @@ -414,6 +411,14 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( TORRENT_ASSERT(m_magic == 0x1337); m_magic = 0xdead; + // abort should have been triggered + TORRENT_ASSERT(m_abort); + + // there are not supposed to be any writes in-flight by now + TORRENT_ASSERT(m_store_buffer.size() == 0); + + // all torrents are supposed to have been removed by now + TORRENT_ASSERT(m_torrents.size() == m_free_slots.size()); TORRENT_ASSERT(m_generic_threads.num_threads() == 0); TORRENT_ASSERT(m_hash_threads.num_threads() == 0); if (!m_generic_io_jobs.m_queued_jobs.empty()) @@ -565,7 +570,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( iovec_t b = {a.buf.data() + a.buffer_offset, a.buffer_size}; int const ret = j->storage->readv(m_settings, b - , a.piece, a.offset, file_mode_for_job(j), j->error); + , a.piece, a.offset, file_mode_for_job(j), j->flags, j->error); TORRENT_ASSERT(ret >= 0 || j->error.ec); TORRENT_UNUSED(ret); @@ -585,7 +590,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( status_t mmap_disk_io::do_job(aux::job::read& a, aux::mmap_disk_job* j) { - a.buf = disk_buffer_holder(*this, m_buffer_pool.allocate_buffer("send buffer"), default_block_size); + a.buf = disk_buffer_holder(m_buffer_pool, m_buffer_pool.allocate_buffer("send buffer"), default_block_size); if (!a.buf) { j->error.ec = error::no_memory; @@ -599,7 +604,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( iovec_t b = {a.buf.data(), a.buffer_size}; int const ret = j->storage->readv(m_settings, b - , a.piece, a.offset, file_mode, j->error); + , a.piece, a.offset, file_mode, j->flags, j->error); TORRENT_ASSERT(ret >= 0 || j->error.ec); TORRENT_UNUSED(ret); @@ -629,7 +634,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( // the actual write operation int const ret = j->storage->writev(m_settings, b - , a.piece, a.offset, file_mode, j->error); + , a.piece, a.offset, file_mode, j->flags, j->error); m_stats_counters.inc_stats_counter(counters::num_writing_threads, -1); @@ -702,7 +707,9 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( int const ret = m_store_buffer.get2(loc1, loc2, [&](char const* buf1, char const* buf2) { - buffer = disk_buffer_holder(*this, m_buffer_pool.allocate_buffer("send buffer"), r.length); + buffer = disk_buffer_holder(m_buffer_pool + , m_buffer_pool.allocate_buffer("send buffer") + , r.length); if (!buffer) { ec.ec = error::no_memory; @@ -764,7 +771,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( { if (m_store_buffer.get({ storage, r.piece, block_offset }, [&](char const* buf) { - buffer = disk_buffer_holder(*this, m_buffer_pool.allocate_buffer("send buffer"), r.length); + buffer = disk_buffer_holder(m_buffer_pool, m_buffer_pool.allocate_buffer("send buffer"), r.length); if (!buffer) { ec.ec = error::no_memory; @@ -811,7 +818,7 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( { TORRENT_ASSERT(valid_flags(flags)); bool exceeded = false; - disk_buffer_holder buffer(*this, m_buffer_pool.allocate_buffer( + disk_buffer_holder buffer(m_buffer_pool, m_buffer_pool.allocate_buffer( exceeded, o, "receive buffer"), default_block_size); if (!buffer) aux::throw_ex(); std::memcpy(buffer.data(), buf, aux::numeric_cast(r.length)); @@ -1075,9 +1082,12 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( { if (v1) { + // if we will call hashv2() in a bit, don't trigger a flush + // just yet, let hashv2() do it + auto const flags = v2_block ? (j->flags & ~disk_interface::flush_piece) : j->flags; j->error.ec.clear(); ret = j->storage->hashv(m_settings, h, len, a.piece, offset - , file_mode, j->flags, j->error); + , file_mode, flags, j->error); if (ret < 0) break; } if (v2_block) @@ -1207,8 +1217,8 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( TORRENT_ASSERT(j->storage->files().piece_length() > 0); // always initialize the storage - j->storage->initialize(m_settings, j->error); - if (j->error) return status_t::fatal_disk_error; + auto const ret_flag = j->storage->initialize(m_settings, j->error); + if (j->error) return status_t::fatal_disk_error | ret_flag; // we must call verify_resume() unconditionally of the setting below, in // order to set up the links (if present) @@ -1220,21 +1230,23 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( // as they succeed. if (m_settings.get_bool(settings_pack::no_recheck_incomplete_resume)) - return status_t::no_error; + return status_t::no_error | ret_flag; if (!aux::contains_resume_data(*rd)) { // if we don't have any resume data, we still may need to trigger a // full re-check, if there are *any* files. storage_error ignore; - return (j->storage->has_any_file(ignore)) + return ((j->storage->has_any_file(ignore)) ? status_t::need_full_check - : status_t::no_error; + : status_t::no_error) + | ret_flag; } - return verify_success + return (verify_success ? status_t::no_error - : status_t::need_full_check; + : status_t::need_full_check) + | ret_flag; } status_t mmap_disk_io::do_job(aux::job::rename_file& a, aux::mmap_disk_job* j) @@ -1515,10 +1527,10 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( struct MPI { ULONG MemoryPriority; }; -#ifndef MEMORY_PRIORITY_BELOW_NORMAL - ULONG const MEMORY_PRIORITY_BELOW_NORMAL = 4; +#ifndef MEMORY_PRIORITY_LOW + ULONG const MEMORY_PRIORITY_LOW = 2; #endif - MPI info{MEMORY_PRIORITY_BELOW_NORMAL}; + MPI info{MEMORY_PRIORITY_LOW}; SetThreadInformation(GetCurrentThread(), ThreadMemoryPriority , &info, sizeof(info)); } @@ -1745,18 +1757,20 @@ TORRENT_EXPORT std::unique_ptr mmap_disk_io_constructor( completed.push_back(j); } } - - if (!new_jobs.empty()) + else { + if (!new_jobs.empty()) { - std::lock_guard l(m_job_mutex); - m_generic_io_jobs.m_queued_jobs.append(new_jobs); - } + { + std::lock_guard l(m_job_mutex); + m_generic_io_jobs.m_queued_jobs.append(new_jobs); + } - { - std::lock_guard l(m_job_mutex); - m_generic_io_jobs.m_job_cond.notify_all(); - m_generic_threads.job_queued(m_generic_io_jobs.m_queued_jobs.size()); + { + std::lock_guard l(m_job_mutex); + m_generic_io_jobs.m_job_cond.notify_all(); + m_generic_threads.job_queued(m_generic_io_jobs.m_queued_jobs.size()); + } } } diff --git a/src/mmap_storage.cpp b/src/mmap_storage.cpp index 93fb00d1c03..24a717bcaca 100644 --- a/src/mmap_storage.cpp +++ b/src/mmap_storage.cpp @@ -186,7 +186,7 @@ error_code translate_error(std::system_error const& err, bool const write) // so we just don't use a partfile for this file std::string const fp = fs.file_path(i, m_save_path); - if (exists(fp, ec.ec)) use_partfile(i, false); + bool const file_exists = exists(fp, ec.ec); if (ec.ec) { ec.file(i); @@ -194,6 +194,7 @@ error_code translate_error(std::system_error const& err, bool const write) prio = m_file_priority; return; } + use_partfile(i, !file_exists); /* auto f = open_file(sett, i, aux::open_mode::read_only, ec); if (ec.ec != boost::system::errc::no_such_file_or_directory) @@ -252,11 +253,17 @@ error_code translate_error(std::system_error const& err, bool const write) void mmap_storage::use_partfile(file_index_t const index, bool const b) { - if (index >= m_use_partfile.end_index()) m_use_partfile.resize(static_cast(index) + 1, true); + if (index >= m_use_partfile.end_index()) + { + // no need to extend this array if we're just setting it to "true", + // that's default already + if (b) return; + m_use_partfile.resize(static_cast(index) + 1, true); + } m_use_partfile[index] = b; } - void mmap_storage::initialize(settings_interface const& sett, storage_error& ec) + status_t mmap_storage::initialize(settings_interface const& sett, storage_error& ec) { m_stat_cache.reserve(files().num_files()); @@ -274,6 +281,7 @@ error_code translate_error(std::system_error const& err, bool const write) } file_storage const& fs = files(); + status_t ret{}; // if some files have priority 0, we need to check if they exist on the // filesystem, in which case we won't use a partfile for them. // this is to be backwards compatible with previous versions of @@ -283,16 +291,20 @@ error_code translate_error(std::system_error const& err, bool const write) if (m_file_priority[i] != dont_download || fs.pad_file_at(i)) continue; - file_status s; - std::string const file_path = fs.file_path(i, m_save_path); error_code err; - stat_file(file_path, &s, err); - if (!err) + auto const size = m_stat_cache.get_filesize(i, fs, m_save_path, err); + if (!err && size > 0) { use_partfile(i, false); + if (size > fs.file_size(i)) + ret = ret | status_t::oversized_file; } else { + // we may have earlier determined we *can't* use a partfile for + // this file, we need to be able to change our mind in case the + // file disappeared + use_partfile(i, true); need_partfile(); } } @@ -301,10 +313,12 @@ error_code translate_error(std::system_error const& err, bool const write) , [&sett, this](file_index_t const file_index, storage_error& e) { open_file(sett, file_index, aux::open_mode::write, e); } , aux::create_symlink + , [&ret](file_index_t, std::int64_t) { ret = ret | status_t::oversized_file; } , ec); // close files that were opened in write mode m_pool.release(storage_index()); + return ret; } bool mmap_storage::has_any_file(storage_error& ec) @@ -321,14 +335,13 @@ error_code translate_error(std::system_error const& err, bool const write) if (!ec) return true; // the part file not existing is expected - if (ec && ec.ec == boost::system::errc::no_such_file_or_directory) + if (ec.ec == boost::system::errc::no_such_file_or_directory) ec.ec.clear(); if (ec) { ec.file(torrent_status::error_file_partfile); ec.operation = operation_t::file_stat; - return false; } return false; } @@ -464,13 +477,15 @@ error_code translate_error(std::system_error const& err, bool const write) int mmap_storage::readv(settings_interface const& sett , span bufs , piece_index_t const piece, int const offset - , aux::open_mode_t const mode, storage_error& error) + , aux::open_mode_t const mode + , disk_job_flags_t const flags + , storage_error& error) { #ifdef TORRENT_SIMULATE_SLOW_READ std::this_thread::sleep_for(seconds(1)); #endif return readwritev(files(), bufs, piece, offset, error - , [this, mode, &sett](file_index_t const file_index + , [this, mode, flags, &sett](file_index_t const file_index , std::int64_t const file_offset , span vec, storage_error& ec) { @@ -526,6 +541,10 @@ error_code translate_error(std::system_error const& err, bool const write) file_range = file_range.subspan(buf.size()); ret += static_cast(buf.size()); } + if (flags & disk_interface::volatile_read) + handle->dont_need(file_range); + if (flags & disk_interface::flush_piece) + handle->page_out(file_range); } } catch (std::system_error const& err) @@ -553,10 +572,12 @@ error_code translate_error(std::system_error const& err, bool const write) int mmap_storage::writev(settings_interface const& sett , span bufs , piece_index_t const piece, int const offset - , aux::open_mode_t const mode, storage_error& error) + , aux::open_mode_t const mode + , disk_job_flags_t const flags + , storage_error& error) { return readwritev(files(), bufs, piece, offset, error - , [this, mode, &sett](file_index_t const file_index + , [this, mode, flags, &sett](file_index_t const file_index , std::int64_t const file_offset , span vec, storage_error& ec) { @@ -616,6 +637,11 @@ error_code translate_error(std::system_error const& err, bool const write) file_range = file_range.subspan(buf.size()); ret += static_cast(buf.size()); } + + if (flags & disk_interface::volatile_read) + handle->dont_need(file_range); + if (flags & disk_interface::flush_piece) + handle->page_out(file_range); } catch (std::system_error const& err) { @@ -707,6 +733,8 @@ error_code translate_error(std::system_error const& err, bool const write) ret += static_cast(file_range.size()); if (flags & disk_interface::volatile_read) handle->dont_need(file_range); + if (flags & disk_interface::flush_piece) + handle->page_out(file_range); } return ret; @@ -756,6 +784,8 @@ error_code translate_error(std::system_error const& err, bool const write) ph.update(file_range); if (flags & disk_interface::volatile_read) handle->dont_need(file_range); + if (flags & disk_interface::flush_piece) + handle->page_out(file_range); return static_cast(file_range.size()); } @@ -793,7 +823,14 @@ error_code translate_error(std::system_error const& err, bool const write) std::optional h = open_file_impl(sett, file, mode, ec); if ((mode & aux::open_mode::write) - && ec.ec == boost::system::errc::no_such_file_or_directory) + && (ec.ec == boost::system::errc::no_such_file_or_directory +#ifdef TORRENT_WINDOWS + // this is a workaround for improper handling of files on windows shared drives. + // if the directory on a shared drive does not exist, + // windows returns ERROR_IO_DEVICE instead of ERROR_FILE_NOT_FOUND + || ec.ec == error_code(ERROR_IO_DEVICE, system_category()) +#endif + )) { // this means the directory the file is in doesn't exist. // so create it @@ -850,8 +887,9 @@ error_code translate_error(std::system_error const& err, bool const write) } // if we have a cache already, don't store the data twice by leaving it in the OS cache as well - if (sett.get_int(settings_pack::disk_io_write_mode) - == settings_pack::disable_os_cache) + auto const write_mode = sett.get_int(settings_pack::disk_io_write_mode); + if (write_mode == settings_pack::disable_os_cache + || write_mode == settings_pack::write_through) { mode |= aux::open_mode::no_cache; } diff --git a/src/path.cpp b/src/path.cpp index a255b778cb1..485720f89e2 100644 --- a/src/path.cpp +++ b/src/path.cpp @@ -78,19 +78,8 @@ see LICENSE file. #include #include #include - -#ifdef TORRENT_LINUX -// linux specifics - #include -#elif defined __APPLE__ && defined __MACH__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050 -// mac specifics - -#include - -#endif - #endif // posix part #include "libtorrent/aux_/disable_warnings_pop.hpp" @@ -452,88 +441,6 @@ namespace { return false; } - void recursive_copy(std::string const& old_path, std::string const& new_path, error_code& ec) - { - TORRENT_ASSERT(!ec); - if (is_directory(old_path, ec)) - { - create_directory(new_path, ec); - if (ec) return; - for (aux::directory i(old_path, ec); !i.done(); i.next(ec)) - { - std::string f = i.file(); - if (f == ".." || f == ".") continue; - recursive_copy(combine_path(old_path, f), combine_path(new_path, f), ec); - if (ec) return; - } - } - else if (!ec) - { - copy_file(old_path, new_path, ec); - } - } - - void copy_file(std::string const& inf, std::string const& newf, error_code& ec) - { - ec.clear(); - native_path_string f1 = convert_to_native_path_string(inf); - native_path_string f2 = convert_to_native_path_string(newf); - -#ifdef TORRENT_WINDOWS - - if (CopyFileW(f1.c_str(), f2.c_str(), false) == 0) - ec.assign(GetLastError(), system_category()); - -#elif defined __APPLE__ && defined __MACH__ && MAC_OS_X_VERSION_MIN_REQUIRED >= 1050 - // this only works on 10.5 - copyfile_state_t state = copyfile_state_alloc(); - if (copyfile(f1.c_str(), f2.c_str(), state, COPYFILE_ALL) < 0) - ec.assign(errno, system_category()); - copyfile_state_free(state); -#else - int const infd = ::open(f1.c_str(), O_RDONLY); - if (infd < 0) - { - ec.assign(errno, system_category()); - return; - } - - // rely on default umask to filter x and w permissions - // for group and others - mode_t const permissions = S_IRUSR | S_IWUSR - | S_IRGRP | S_IWGRP - | S_IROTH | S_IWOTH; - - int const outfd = ::open(f2.c_str(), O_WRONLY | O_CREAT, permissions); - if (outfd < 0) - { - close(infd); - ec.assign(errno, system_category()); - return; - } - char buffer[4096]; - for (;;) - { - int const num_read = int(read(infd, buffer, sizeof(buffer))); - if (num_read == 0) break; - if (num_read < 0) - { - ec.assign(errno, system_category()); - break; - } - int const num_written = int(write(outfd, buffer, std::size_t(num_read))); - if (num_written < num_read) - { - ec.assign(errno, system_category()); - break; - } - if (num_read < int(sizeof(buffer))) break; - } - close(infd); - close(outfd); -#endif // TORRENT_WINDOWS - } - void move_file(std::string const& inf, std::string const& newf, error_code& ec) { ec.clear(); diff --git a/src/posix_disk_io.cpp b/src/posix_disk_io.cpp index e7659e59682..e95c0b92c03 100644 --- a/src/posix_disk_io.cpp +++ b/src/posix_disk_io.cpp @@ -37,7 +37,6 @@ namespace { struct TORRENT_EXTRA_EXPORT posix_disk_io final : disk_interface - , buffer_allocator_interface { posix_disk_io(io_context& ios, settings_interface const& sett, counters& cnt) : m_settings(sett) @@ -77,13 +76,15 @@ namespace { , std::function handler , disk_job_flags_t) override { - disk_buffer_holder buffer = disk_buffer_holder(*this, m_buffer_pool.allocate_buffer("send buffer"), default_block_size); + disk_buffer_holder buffer = disk_buffer_holder(m_buffer_pool + , m_buffer_pool.allocate_buffer("send buffer") + , default_block_size); storage_error error; if (!buffer) { error.ec = errors::no_memory; error.operation = operation_t::alloc_cache_piece; - post(m_ios, [=, h = std::move(handler)]{ h(disk_buffer_holder(*this, nullptr, 0), error); }); + post(m_ios, [=, h = std::move(handler)]{ h(disk_buffer_holder(m_buffer_pool, nullptr, 0), error); }); return; } @@ -145,7 +146,7 @@ namespace { bool const v1 = bool(flags & disk_interface::v1_hash); bool const v2 = !block_hashes.empty(); - disk_buffer_holder buffer = disk_buffer_holder(*this, m_buffer_pool.allocate_buffer("hash buffer"), default_block_size); + disk_buffer_holder buffer = disk_buffer_holder(m_buffer_pool, m_buffer_pool.allocate_buffer("hash buffer"), default_block_size); storage_error error; if (!buffer) { @@ -205,7 +206,7 @@ namespace { { time_point const start_time = clock_type::now(); - disk_buffer_holder buffer = disk_buffer_holder(*this, m_buffer_pool.allocate_buffer("hash buffer"), 0x4000); + disk_buffer_holder buffer = disk_buffer_holder(m_buffer_pool, m_buffer_pool.allocate_buffer("hash buffer"), 0x4000); storage_error error; if (!buffer) { @@ -285,28 +286,30 @@ namespace { storage_error error; status_t const ret = [&] { - st->initialize(m_settings, error); - if (error) return status_t::fatal_disk_error; + auto const ret_flag = st->initialize(m_settings, error); + if (error) return status_t::fatal_disk_error | ret_flag; bool const verify_success = st->verify_resume_data(*rd , std::move(links), error); if (m_settings.get_bool(settings_pack::no_recheck_incomplete_resume)) - return status_t::no_error; + return status_t::no_error | ret_flag; if (!aux::contains_resume_data(*rd)) { // if we don't have any resume data, we still may need to trigger a // full re-check, if there are *any* files. storage_error ignore; - return (st->has_any_file(ignore)) + return ((st->has_any_file(ignore)) ? status_t::need_full_check - : status_t::no_error; + : status_t::no_error) + | ret_flag; } - return verify_success + return (verify_success ? status_t::no_error - : status_t::need_full_check; + : status_t::need_full_check) + | ret_flag; }(); post(m_ios, [error, ret, h = std::move(handler)]{ h(ret, error); }); @@ -348,10 +351,6 @@ namespace { post(m_ios, [=, h = std::move(handler)]{ h(index); }); } - // implements buffer_allocator_interface - void free_disk_buffer(char* b) override - { m_buffer_pool.free_buffer(b); } - void update_stats_counters(counters&) const override {} std::vector get_status(storage_index_t) const override diff --git a/src/posix_storage.cpp b/src/posix_storage.cpp index 65ca61e225f..da7466b848a 100644 --- a/src/posix_storage.cpp +++ b/src/posix_storage.cpp @@ -109,7 +109,7 @@ namespace aux { // so we just don't use a partfile for this file std::string const fp = fs.file_path(i, m_save_path); - if (exists(fp, ec.ec)) use_partfile(i, false); + bool const file_exists = exists(fp, ec.ec); if (ec.ec) { ec.file(i); @@ -117,6 +117,7 @@ namespace aux { prio = m_file_priority; return; } + use_partfile(i, !file_exists); } ec.ec.clear(); m_file_priority[i] = new_prio; @@ -379,7 +380,7 @@ namespace aux { m_mapped_files->rename_file(index, new_filename); } - void posix_storage::initialize(settings_interface const&, storage_error& ec) + status_t posix_storage::initialize(settings_interface const&, storage_error& ec) { m_stat_cache.reserve(files().num_files()); @@ -388,6 +389,7 @@ namespace aux { // filesystem, in which case we won't use a partfile for them. // this is to be backwards compatible with previous versions of // libtorrent, when part files were not supported. + status_t ret{}; for (file_index_t i(0); i < m_file_priority.end_index(); ++i) { if (m_file_priority[i] != dont_download || fs.pad_file_at(i)) @@ -397,6 +399,10 @@ namespace aux { std::string const file_path = fs.file_path(i, m_save_path); error_code err; stat_file(file_path, &s, err); + + if (s.file_size > fs.file_size(i)) + ret = ret | status_t::oversized_file; + if (!err) { use_partfile(i, false); @@ -411,7 +417,9 @@ namespace aux { , [this](file_index_t const file_index, storage_error& e) { open_file(file_index, aux::open_mode::write, 0, e); } , aux::create_symlink + , [&ret](file_index_t, std::int64_t) { ret = ret | status_t::oversized_file; } , ec); + return ret; } file_pointer posix_storage::open_file(file_index_t idx, open_mode_t const mode @@ -438,8 +446,15 @@ namespace aux { // if we fail to open a file for writing, and the error is ENOENT, // it is likely because the directory we're creating the file in // does not exist. Create the directory and try again. - if ((mode & open_mode::write) - && ec.ec == boost::system::errc::no_such_file_or_directory) + if ((mode & aux::open_mode::write) + && (ec.ec == boost::system::errc::no_such_file_or_directory +#ifdef TORRENT_WINDOWS + // this is a workaround for improper handling of files on windows shared drives. + // if the directory on a shared drive does not exist, + // windows returns ERROR_IO_DEVICE instead of ERROR_FILE_NOT_FOUND + || ec.ec == error_code(ERROR_IO_DEVICE, system_category()) +#endif + )) { // this means the directory the file is in doesn't exist. // so create it @@ -501,7 +516,13 @@ namespace aux { void posix_storage::use_partfile(file_index_t const index, bool const b) { - if (index >= m_use_partfile.end_index()) m_use_partfile.resize(static_cast(index) + 1, true); + if (index >= m_use_partfile.end_index()) + { + // no need to extend this array if we're just setting it to "true", + // that's default already + if (b) return; + m_use_partfile.resize(static_cast(index) + 1, true); + } m_use_partfile[index] = b; } diff --git a/src/session_impl.cpp b/src/session_impl.cpp index 5759aa5b0a1..ee16a4893ce 100644 --- a/src/session_impl.cpp +++ b/src/session_impl.cpp @@ -6163,6 +6163,10 @@ namespace { } m_torrents.clear(); + // this has probably been called already, but in case of sudden + // termination through an exception, it may not have been done + abort_stage2(); + #if defined TORRENT_ASIO_DEBUGGING FILE* f = fopen("wakeups.log", "w+"); if (f != nullptr) diff --git a/src/settings_pack.cpp b/src/settings_pack.cpp index de86e379f34..8c4745193f7 100644 --- a/src/settings_pack.cpp +++ b/src/settings_pack.cpp @@ -94,6 +94,12 @@ namespace libtorrent { constexpr int CLOSE_FILE_INTERVAL = 240; #else constexpr int CLOSE_FILE_INTERVAL = 0; +#endif + +#ifdef TORRENT_WINDOWS +constexpr int DISK_WRITE_MODE = settings_pack::write_through; +#else +constexpr int DISK_WRITE_MODE = settings_pack::enable_os_cache; #endif // tested to fail with _MSC_VER <= 1916. The actual version condition @@ -253,7 +259,7 @@ constexpr int CLOSE_FILE_INTERVAL = 0; DEPRECATED_SET(cache_size, 2048, nullptr), DEPRECATED_SET(cache_buffer_chunk_size, 0, nullptr), DEPRECATED_SET(cache_expiry, 300, nullptr), - SET(disk_io_write_mode, settings_pack::enable_os_cache, nullptr), + SET(disk_io_write_mode, DISK_WRITE_MODE, nullptr), SET(disk_io_read_mode, settings_pack::enable_os_cache, nullptr), SET(outgoing_port, 0, nullptr), SET(num_outgoing_ports, 0, nullptr), diff --git a/src/storage_utils.cpp b/src/storage_utils.cpp index 14d407745e8..e6eeb6d225e 100644 --- a/src/storage_utils.cpp +++ b/src/storage_utils.cpp @@ -667,6 +667,7 @@ std::int64_t get_filesize(stat_cache& stat, file_index_t const file_index , aux::vector const& file_priority , std::function create_file , std::function create_link + , std::function oversized_file , storage_error& ec) { // create zero-sized files @@ -684,7 +685,7 @@ std::int64_t get_filesize(stat_cache& stat, file_index_t const file_index // this is just to see if the file exists error_code err; - sc.get_filesize(file_index, fs, save_path, err); + auto const sz = sc.get_filesize(file_index, fs, save_path, err); if (err && err != boost::system::errc::no_such_file_or_directory) { @@ -694,11 +695,18 @@ std::int64_t get_filesize(stat_cache& stat, file_index_t const file_index break; } + auto const fs_file_size = fs.file_size(file_index); + if (!err && sz > fs_file_size) + { + // this file is oversized, alert the client + oversized_file(file_index, sz); + } + // if the file is empty and doesn't already exist, create it // deliberately don't truncate files that already exist // if a file is supposed to have size 0, but already exists, we will // never truncate it to 0. - if (fs.file_size(file_index) == 0) + if (fs_file_size == 0) { // create symlinks if (fs.file_flags(file_index) & file_storage::flag_symlink) diff --git a/src/torrent.cpp b/src/torrent.cpp index ff4e3bdeb27..948877d4c25 100644 --- a/src/torrent.cpp +++ b/src/torrent.cpp @@ -739,6 +739,11 @@ bool is_downloading_state(int const st) rp->blocks_left = blocks_in_piece; rp->fail = false; + disk_job_flags_t flags{}; + auto const read_mode = settings().get_int(settings_pack::disk_io_read_mode); + if (read_mode == settings_pack::disable_os_cache) + flags |= disk_interface::volatile_read; + peer_request r; r.piece = piece; r.start = 0; @@ -748,7 +753,8 @@ bool is_downloading_state(int const st) r.length = std::min(piece_size - r.start, block_size()); m_ses.disk_thread().async_read(m_storage, r , [self, r, rp](disk_buffer_holder block, storage_error const& se) mutable - { self->on_disk_read_complete(std::move(block), se, r, rp); }); + { self->on_disk_read_complete(std::move(block), se, r, rp); } + , flags); } m_ses.deferred_submit_jobs(); } @@ -1317,8 +1323,16 @@ bool is_downloading_state(int const st) p.length = std::min(piece_size - p.start, block_size()); m_stats_counters.inc_stats_counter(counters::queued_write_bytes, p.length); + + disk_job_flags_t dflags{}; + + auto const write_mode = settings().get_int(settings_pack::disk_io_write_mode); + if (write_mode == settings_pack::disable_os_cache) + dflags |= disk_interface::flush_piece | disk_interface::volatile_read; + m_ses.disk_thread().async_write(m_storage, p, data + p.start, nullptr - , [self, p](storage_error const& error) { self->on_disk_write_complete(error, p); }); + , [self, p](storage_error const& error) { self->on_disk_write_complete(error, p); } + , dflags); bool const was_finished = picker().is_piece_finished(p.piece); bool const multi = picker().num_peers(block) > 1; @@ -1968,7 +1982,7 @@ bool is_downloading_state(int const st) return m_outgoing_pids.count(pid) > 0; } - void torrent::on_resume_data_checked(status_t const status + void torrent::on_resume_data_checked(status_t status , storage_error const& error) try { #if TORRENT_USE_ASSERTS @@ -1988,6 +2002,14 @@ bool is_downloading_state(int const st) if (m_abort) return; + if ((status & status_t::oversized_file) != status_t{}) + { + // clear the flag + status = status & ~status_t::oversized_file; + if (m_ses.alerts().should_post()) + m_ses.alerts().emplace_alert(get_handle()); + } + if (status == status_t::fatal_disk_error) { TORRENT_ASSERT(m_outstanding_check_files == false); @@ -2263,7 +2285,7 @@ bool is_downloading_state(int const st) m_ses.deferred_submit_jobs(); } - void torrent::on_force_recheck(status_t const status, storage_error const& error) try + void torrent::on_force_recheck(status_t status, storage_error const& error) try { TORRENT_ASSERT(is_single_thread()); @@ -2272,6 +2294,14 @@ bool is_downloading_state(int const st) if (m_abort) return; + if ((status & status_t::oversized_file) != status_t{}) + { + // clear the flag + status = status & ~status_t::oversized_file; + if (m_ses.alerts().should_post()) + m_ses.alerts().emplace_alert(get_handle()); + } + if (error) { handle_disk_error("force_recheck", error); @@ -11105,9 +11135,17 @@ namespace { TORRENT_ASSERT(m_storage); TORRENT_ASSERT(!m_picker->is_hashing(piece)); - disk_job_flags_t flags; + // we just completed the piece, it should be flushed to disk + disk_job_flags_t flags{}; + + auto const write_mode = settings().get_int(settings_pack::disk_io_write_mode); + if (write_mode == settings_pack::write_through) + flags |= disk_interface::flush_piece; + else if (write_mode == settings_pack::disable_os_cache) + flags |= disk_interface::flush_piece | disk_interface::volatile_read; if (torrent_file().info_hashes().has_v1()) flags |= disk_interface::v1_hash; + aux::vector hashes; if (torrent_file().info_hashes().has_v2()) { diff --git a/src/truncate.cpp b/src/truncate.cpp new file mode 100644 index 00000000000..fe23e864466 --- /dev/null +++ b/src/truncate.cpp @@ -0,0 +1,177 @@ +/* + +Copyright (c) 2022, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "libtorrent/truncate.hpp" +#include "libtorrent/file_storage.hpp" +#include "libtorrent/aux_/path.hpp" +#include "libtorrent/operations.hpp" + +#ifdef TORRENT_WINDOWS +#include "libtorrent/aux_/windows.hpp" +#else +#include +#endif + +namespace libtorrent { + +#ifdef TORRENT_WINDOWS + +void truncate_files(file_storage const& fs, std::string const& save_path, storage_error& ec) +{ + for (auto i : fs.file_range()) + { + if (fs.pad_file_at(i)) continue; + auto const fn = fs.file_path(i, save_path); + native_path_string const file_path = convert_to_native_path_string(fn); +#ifdef TORRENT_WINRT + HANDLE handle = CreateFile2(file_path.c_str() + , GENERIC_WRITE | GENERIC_READ + , FILE_SHARE_READ | FILE_SHARE_WRITE + , OPEN_EXISTING + , nullptr); +#else + HANDLE handle = CreateFileW(file_path.c_str() + , GENERIC_WRITE | GENERIC_READ + , FILE_SHARE_READ | FILE_SHARE_WRITE + , nullptr + , OPEN_EXISTING + , 0 + , nullptr); +#endif + if (handle == INVALID_HANDLE_VALUE) + { + auto const error = ::GetLastError(); + if (error != ERROR_FILE_NOT_FOUND) + { + ec.ec.assign(error, system_category()); + ec.file(i); + ec.operation = operation_t::file_open; + return; + } + continue; + } + + LARGE_INTEGER file_size; + if (GetFileSizeEx(handle, &file_size) == FALSE) + { + ec.ec.assign(::GetLastError(), system_category()); + ec.file(i); + ec.operation = operation_t::file_stat; + ::CloseHandle(handle); + return; + } + + if (file_size.QuadPart < fs.file_size(i)) + { + ::CloseHandle(handle); + continue; + } + + LARGE_INTEGER sz; + sz.QuadPart = fs.file_size(i); + if (SetFilePointerEx(handle, sz, nullptr, FILE_BEGIN) == FALSE) + { + ec.ec.assign(::GetLastError(), system_category()); + ec.file(i); + ec.operation = operation_t::file_seek; + ::CloseHandle(handle); + return; + } + + if (::SetEndOfFile(handle) == FALSE) + { + ec.ec.assign(::GetLastError(), system_category()); + ec.file(i); + ec.operation = operation_t::file_truncate; + ::CloseHandle(handle); + return; + } + ::CloseHandle(handle); + } +} + +#else + +void truncate_files(file_storage const& fs, std::string const& save_path, storage_error& ec) +{ + for (auto i : fs.file_range()) + { + if (fs.pad_file_at(i)) continue; + auto const fn = fs.file_path(i, save_path); + native_path_string const file_path = convert_to_native_path_string(fn); + int const fd = ::open(file_path.c_str(), O_RDWR); + + if (fd < 0) + { + int const error = errno; + if (error != ENOENT) + { + ec.ec.assign(error, generic_category()); + ec.file(i); + ec.operation = operation_t::file_open; + return; + } + continue; + } + + struct ::stat st; + if (::fstat(fd, &st) != 0) + { + ec.ec.assign(errno, system_category()); + ec.file(i); + ec.operation = operation_t::file_stat; + ::close(fd); + return; + } + + if (st.st_size < fs.file_size(i)) + { + ::close(fd); + continue; + } + + if (::ftruncate(fd, static_cast(fs.file_size(i))) < 0) + { + ec.ec.assign(errno, system_category()); + ec.file(i); + ec.operation = operation_t::file_truncate; + ::close(fd); + return; + } + + ::close(fd); + } +} + +#endif + +} diff --git a/test/Jamfile b/test/Jamfile index 25248e67898..e2bd6246d3b 100644 --- a/test/Jamfile +++ b/test/Jamfile @@ -63,14 +63,14 @@ project libtorrent_test /torrent//torrent # C4127: conditional expression is constant - msvc:/wd4127 + msvc:/wd4127 # C4309: 'conversion' : truncation of constant value - msvc:/wd4309 + msvc:/wd4309 # C4310: cast truncates constant value - msvc:/wd4310 + msvc:/wd4310 # C4268: 'identifier' : 'const' static/global data initialized # with compiler generated default constructor fills the object with zeros - msvc:/wd4268 + msvc:/wd4268 @warnings on : default-build @@ -198,6 +198,8 @@ run test_hash_picker.cpp ; run test_torrent.cpp ; run test_remap_files.cpp ; run test_similar_torrent.cpp ; +run test_truncate.cpp ; +run test_copy_file.cpp ; # turn these tests into simulations run test_resume.cpp ; @@ -316,5 +318,6 @@ alias deterministic-tests : test_xml test_store_buffer test_similar_torrent + test_truncate test_vector_utils ; diff --git a/test/main.cpp b/test/main.cpp index c34674acfac..c3050567cb4 100644 --- a/test/main.cpp +++ b/test/main.cpp @@ -17,7 +17,7 @@ see LICENSE file. #include // for exit() #include "libtorrent/address.hpp" #include "libtorrent/socket.hpp" -#include "setup_transfer.hpp" // for _g_test_failures +#include "setup_transfer.hpp" // for unit_test::g_test_failures #include "test.hpp" #include "dht_server.hpp" // for stop_dht #include "peer_server.hpp" // for stop_peer @@ -64,7 +64,7 @@ bool redirect_stderr = false; bool keep_files = false; // the current tests file descriptor -unit_test_t* current_test = nullptr; +unit_test::unit_test_t* current_test = nullptr; void output_test_log_to_terminal() { @@ -275,6 +275,8 @@ struct unit_directory_guard std::string dir; }; +namespace unit_test { + void EXPORT reset_output() { if (current_test == nullptr || current_test->output == nullptr) return; @@ -293,6 +295,8 @@ void EXPORT reset_output() } } +} + int EXPORT main(int argc, char const* argv[]) { char const* executable = argv[0]; @@ -312,9 +316,9 @@ int EXPORT main(int argc, char const* argv[]) if (argv[0] == "-l"_sv || argv[0] == "--list"_sv) { std::printf("TESTS:\n"); - for (int i = 0; i < _g_num_unit_tests; ++i) + for (int i = 0; i < ::unit_test::g_num_unit_tests; ++i) { - std::printf(" - %s\n", _g_unit_tests[i].name); + std::printf(" - %s\n", ::unit_test::g_unit_tests[i].name); } return 0; } @@ -396,7 +400,7 @@ int EXPORT main(int argc, char const* argv[]) std::string const unit_dir_prefix = combine_path(root_dir, "test_tmp_" + std::to_string(process_id) + "_"); std::printf("test: %s\ncwd_prefix = \"%s\"\n", executable, unit_dir_prefix.c_str()); - if (_g_num_unit_tests == 0) + if (unit_test::g_num_unit_tests == 0) { std::printf("\x1b[31mTEST_ERROR: no unit tests registered\x1b[0m\n"); return 1; @@ -406,9 +410,9 @@ int EXPORT main(int argc, char const* argv[]) if (redirect_stderr) old_stderr = dup(fileno(stderr)); int num_run = 0; - for (int i = 0; i < _g_num_unit_tests; ++i) + for (int i = 0; i < unit_test::g_num_unit_tests; ++i) { - if (filter && tests_to_run.count(_g_unit_tests[i].name) == 0) + if (filter && tests_to_run.count(unit_test::g_unit_tests[i].name) == 0) continue; std::string const unit_dir = unit_dir_prefix + std::to_string(i); @@ -429,7 +433,7 @@ int EXPORT main(int argc, char const* argv[]) return 1; } - unit_test_t& t = _g_unit_tests[i]; + auto& t = ::unit_test::g_unit_tests[i]; if (redirect_stdout || redirect_stderr) { @@ -482,7 +486,7 @@ int EXPORT main(int argc, char const* argv[]) setbuf(stdout, nullptr); setbuf(stderr, nullptr); - _g_test_idx = i; + ::unit_test::g_test_idx = i; current_test = &t; std::printf("cwd: %s\n", unit_dir.c_str()); @@ -499,7 +503,7 @@ int EXPORT main(int argc, char const* argv[]) std::srand(unsigned(std::hash{}(executable)) + unsigned(i)); lt::aux::random_engine().seed(0x82daf973); - _g_test_failures = 0; + ::unit_test::g_test_failures = 0; (*t.fun)(); #ifndef BOOST_NO_EXCEPTIONS } @@ -510,28 +514,28 @@ int EXPORT main(int argc, char const* argv[]) , e.code().value() , e.code().category().name() , e.code().message().c_str()); - report_failure(buf, __FILE__, __LINE__); + unit_test::report_failure(buf, __FILE__, __LINE__); } catch (std::exception const& e) { char buf[200]; std::snprintf(buf, sizeof(buf), "TEST_ERROR: Terminated with exception: \"%s\"", e.what()); - report_failure(buf, __FILE__, __LINE__); + unit_test::report_failure(buf, __FILE__, __LINE__); } catch (...) { - report_failure("TEST_ERROR: Terminated with unknown exception", __FILE__, __LINE__); + unit_test::report_failure("TEST_ERROR: Terminated with unknown exception", __FILE__, __LINE__); } #endif if (!tests_to_run.empty()) tests_to_run.erase(t.name); - if (_g_test_failures > 0) + if (::unit_test::g_test_failures > 0) { output_test_log_to_terminal(); } - t.num_failures = _g_test_failures; + t.num_failures = ::unit_test::g_test_failures; t.run = true; ++num_run; @@ -570,6 +574,6 @@ int EXPORT main(int argc, char const* argv[]) if (redirect_stdout) fflush(stdout); if (redirect_stderr) fflush(stderr); - return print_failures() ? 333 : 0; + return unit_test::print_failures() ? 333 : 0; } diff --git a/test/setup_transfer.cpp b/test/setup_transfer.cpp index ccf3d760d48..6c16a5319d9 100644 --- a/test/setup_transfer.cpp +++ b/test/setup_transfer.cpp @@ -699,7 +699,7 @@ std::vector get_python() int find_available_port() { - int port = 2000 + (std::int64_t(::getpid()) + _g_test_idx + std::rand()) % 60000; + int port = 2000 + (std::int64_t(::getpid()) + ::unit_test::g_test_idx + std::rand()) % 60000; error_code ec; io_context ios; diff --git a/test/setup_transfer.hpp b/test/setup_transfer.hpp index 9e83fe9724e..6be5863c6cd 100644 --- a/test/setup_transfer.hpp +++ b/test/setup_transfer.hpp @@ -24,13 +24,9 @@ see LICENSE file. EXPORT std::shared_ptr generate_torrent(bool with_files = false, bool with_hashes = false); -EXPORT int print_failures(); - EXPORT int load_file(std::string const& filename, std::vector& v , lt::error_code& ec, int limit = 8000000); -EXPORT void report_failure(char const* err, char const* file, int line); - EXPORT void init_rand_address(); EXPORT lt::address rand_v4(); EXPORT lt::address rand_v6(); diff --git a/test/test.cpp b/test/test.cpp index 418c9dbd18b..7e757661559 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -12,16 +12,20 @@ see LICENSE file. #include "test.hpp" -unit_test_t _g_unit_tests[1024]; -int _g_num_unit_tests = 0; -int _g_test_failures = 0; // flushed at start of every unit -int _g_test_idx = 0; +namespace unit_test { -static std::vector failure_strings; +unit_test_t g_unit_tests[1024]; +int g_num_unit_tests = 0; +int g_test_failures = 0; // flushed at start of every unit +int g_test_idx = 0; + +namespace { +std::vector failure_strings; +} int test_counter() { - return _g_test_idx; + return g_test_idx; } void report_failure(char const* err, char const* file, int line) @@ -30,37 +34,37 @@ void report_failure(char const* err, char const* file, int line) std::snprintf(buf, sizeof(buf), "\x1b[41m***** %s:%d \"%s\" *****\x1b[0m\n", file, line, err); std::printf("\n%s\n", buf); failure_strings.push_back(buf); - ++_g_test_failures; + ++g_test_failures; } int print_failures() { int longest_name = 0; - for (int i = 0; i < _g_num_unit_tests; ++i) + for (int i = 0; i < g_num_unit_tests; ++i) { - int len = int(strlen(_g_unit_tests[i].name)); + int len = int(strlen(g_unit_tests[i].name)); if (len > longest_name) longest_name = len; } std::printf("\n\n"); int total_num_failures = 0; - for (int i = 0; i < _g_num_unit_tests; ++i) + for (int i = 0; i < g_num_unit_tests; ++i) { - if (_g_unit_tests[i].run == false) continue; + if (g_unit_tests[i].run == false) continue; - if (_g_unit_tests[i].num_failures == 0) + if (g_unit_tests[i].num_failures == 0) { std::printf("\x1b[32m[%-*s] ***PASS***\n" - , longest_name, _g_unit_tests[i].name); + , longest_name, g_unit_tests[i].name); } else { - total_num_failures += _g_unit_tests[i].num_failures; + total_num_failures += g_unit_tests[i].num_failures; std::printf("\x1b[31m[%-*s] %d FAILURES\n" , longest_name - , _g_unit_tests[i].name - , _g_unit_tests[i].num_failures); + , g_unit_tests[i].name + , g_unit_tests[i].num_failures); } } @@ -72,3 +76,4 @@ int print_failures() return total_num_failures; } +} // unit_test diff --git a/test/test.hpp b/test/test.hpp index 6bdc776502a..1565302aea3 100644 --- a/test/test.hpp +++ b/test/test.hpp @@ -39,6 +39,8 @@ see LICENSE file. #define EXPORT #endif +namespace unit_test { + void EXPORT report_failure(char const* err, char const* file, int line); int EXPORT print_failures(); int EXPORT test_counter(); @@ -55,28 +57,30 @@ struct unit_test_t FILE* output; }; -extern unit_test_t EXPORT _g_unit_tests[1024]; -extern int EXPORT _g_num_unit_tests; -extern int EXPORT _g_test_failures; -extern int _g_test_idx; +extern unit_test_t EXPORT g_unit_tests[1024]; +extern int EXPORT g_num_unit_tests; +extern int EXPORT g_test_failures; +extern int g_test_idx; + +} // unit_test #define TORRENT_TEST(test_name) \ static void BOOST_PP_CAT(unit_test_, test_name)(); \ static struct BOOST_PP_CAT(register_class_, test_name) { \ BOOST_PP_CAT(register_class_, test_name) () { \ - unit_test_t& t = _g_unit_tests[_g_num_unit_tests]; \ + auto& t = ::unit_test::g_unit_tests[::unit_test::g_num_unit_tests]; \ t.fun = &BOOST_PP_CAT(unit_test_, test_name); \ t.name = __FILE__ "." #test_name; \ t.num_failures = 0; \ t.run = false; \ t.output = nullptr; \ - _g_num_unit_tests++; \ + ::unit_test::g_num_unit_tests++; \ } \ - } BOOST_PP_CAT(_static_registrar_, test_name); \ + } BOOST_PP_CAT(g_static_registrar_for, test_name); \ static void BOOST_PP_CAT(unit_test_, test_name)() #define TEST_REPORT_AUX(x, line, file) \ - report_failure(x, line, file) + unit_test::report_failure(x, line, file) #ifdef BOOST_NO_EXCEPTIONS #define TEST_CHECK(x) \ @@ -85,15 +89,15 @@ extern int _g_test_idx; } while (false) #define TEST_EQUAL(x, y) \ do if ((x) != (y)) { \ - std::stringstream s__; \ - s__ << "TEST_ERROR: equal check failed:\n" #x ": " << (x) << "\nexpected: " << (y); \ - TEST_REPORT_AUX(s__.str().c_str(), __FILE__, __LINE__); \ + std::stringstream _s_; \ + _s_ << "TEST_ERROR: equal check failed:\n" #x ": " << (x) << "\nexpected: " << (y); \ + TEST_REPORT_AUX(_s_.str().c_str(), __FILE__, __LINE__); \ } while (false) #define TEST_NE(x, y) \ do if ((x) == (y)) { \ - std::stringstream s__; \ - s__ << "TEST_ERROR: not equal check failed:\n" #x ": " << (x) << "\nexpected not equal to: " << (y); \ - TEST_REPORT_AUX(s__.str().c_str(), __FILE__, __LINE__); \ + std::stringstream _s_; \ + _s_ << "TEST_ERROR: not equal check failed:\n" #x ": " << (x) << "\nexpected not equal to: " << (y); \ + TEST_REPORT_AUX(_s_.str().c_str(), __FILE__, __LINE__); \ } while (false) #else #define TEST_CHECK(x) \ @@ -102,9 +106,9 @@ extern int _g_test_idx; if (!(x)) \ TEST_REPORT_AUX("TEST_ERROR: check failed: \"" #x "\"", __FILE__, __LINE__); \ } \ - catch (std::exception const& e__) \ + catch (std::exception const& _e) \ { \ - TEST_ERROR("TEST_ERROR: Exception thrown: " #x " :" + std::string(e__.what())); \ + TEST_ERROR("TEST_ERROR: Exception thrown: " #x " :" + std::string(_e.what())); \ } \ catch (...) \ { \ @@ -114,14 +118,14 @@ extern int _g_test_idx; #define TEST_EQUAL(x, y) \ do try { \ if ((x) != (y)) { \ - std::stringstream s__; \ - s__ << "TEST_ERROR: " #x ": " << (x) << " expected: " << (y); \ - TEST_REPORT_AUX(s__.str().c_str(), __FILE__, __LINE__); \ + std::stringstream _s_; \ + _s_ << "TEST_ERROR: " #x ": " << (x) << " expected: " << (y); \ + TEST_REPORT_AUX(_s_.str().c_str(), __FILE__, __LINE__); \ } \ } \ - catch (std::exception const& e__) \ + catch (std::exception const& _e) \ { \ - TEST_ERROR("TEST_ERROR: Exception thrown: " #x " :" + std::string(e__.what())); \ + TEST_ERROR("TEST_ERROR: Exception thrown: " #x " :" + std::string(_e.what())); \ } \ catch (...) \ { \ @@ -130,14 +134,14 @@ extern int _g_test_idx; #define TEST_NE(x, y) \ do try { \ if ((x) == (y)) { \ - std::stringstream s__; \ - s__ << "TEST_ERROR: " #x ": " << (x) << " expected not equal to: " << (y); \ - TEST_REPORT_AUX(s__.str().c_str(), __FILE__, __LINE__); \ + std::stringstream _s_; \ + _s_ << "TEST_ERROR: " #x ": " << (x) << " expected not equal to: " << (y); \ + TEST_REPORT_AUX(_s_.str().c_str(), __FILE__, __LINE__); \ } \ } \ - catch (std::exception const& e__) \ + catch (std::exception const& _e) \ { \ - TEST_ERROR("TEST_ERROR: Exception thrown: " #x " :" + std::string(e__.what())); \ + TEST_ERROR("TEST_ERROR: Exception thrown: " #x " :" + std::string(_e.what())); \ } \ catch (...) \ { \ diff --git a/test/test_alert_types.cpp b/test/test_alert_types.cpp index 7a5a22d5a1c..9e6be094c85 100644 --- a/test/test_alert_types.cpp +++ b/test/test_alert_types.cpp @@ -159,10 +159,11 @@ TORRENT_TEST(alerts_types) TEST_ALERT_TYPE(alerts_dropped_alert, 95, alert_priority::meta, alert_category::error); TEST_ALERT_TYPE(socks5_alert, 96, alert_priority::normal, alert_category::error); TEST_ALERT_TYPE(file_prio_alert, 97, alert_priority::normal, alert_category::storage); + TEST_ALERT_TYPE(oversized_file_alert, 98, alert_priority::normal, alert_category::storage); #undef TEST_ALERT_TYPE - TEST_EQUAL(num_alert_types, 98); + TEST_EQUAL(num_alert_types, 99); TEST_EQUAL(num_alert_types, count_alert_types); } diff --git a/test/test_copy_file.cpp b/test/test_copy_file.cpp new file mode 100644 index 00000000000..6b403a470e5 --- /dev/null +++ b/test/test_copy_file.cpp @@ -0,0 +1,232 @@ +/* + +Copyright (c) 2022, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include "libtorrent/aux_/path.hpp" +#include "libtorrent/error_code.hpp" +#include "libtorrent/aux_/mmap.hpp" +#include "libtorrent/aux_/open_mode.hpp" +#include "test.hpp" + +#include +#include + +#ifndef TORRENT_WINDOWS +#include +#endif + +#ifdef TORRENT_LINUX +#include +#include +#endif + +namespace { + +void write_file(std::string const& filename, int size) +{ + std::vector v; + v.resize(std::size_t(size)); + for (int i = 0; i < size; ++i) + v[std::size_t(i)] = char(i & 255); + + std::ofstream(filename.c_str()).write(v.data(), std::streamsize(v.size())); +} + +bool compare_files(std::string const& file1, std::string const& file2) +{ + lt::error_code ec; + lt::file_status st1; + lt::file_status st2; + lt::stat_file(file1, &st1, ec); + TEST_CHECK(!ec); + lt::stat_file(file2, &st2, ec); + TEST_CHECK(!ec); + if (st1.file_size != st2.file_size) + return false; + + std::ifstream f1(file1.c_str()); + std::ifstream f2(file2.c_str()); + using it = std::istream_iterator; + return std::equal(it(f1), it{}, it(f2)); +} + +#if defined TORRENT_WINDOWS +bool fs_supports_sparse_files() +{ +#ifdef TORRENT_WINRT + HANDLE test = ::CreateFile2(L"test" + , GENERIC_WRITE + , FILE_SHARE_READ + , OPEN_ALWAYS + , nullptr); +#else + HANDLE test = ::CreateFileA("test" + , GENERIC_WRITE + , FILE_SHARE_READ + , nullptr + , OPEN_ALWAYS + , FILE_FLAG_SEQUENTIAL_SCAN + , nullptr); +#endif + TEST_CHECK(test != INVALID_HANDLE_VALUE); + DWORD fs_flags = 0; + wchar_t fs_name[50]; + TEST_CHECK(::GetVolumeInformationByHandleW(test, nullptr, 0, nullptr, nullptr + , &fs_flags, fs_name, sizeof(fs_name)) != 0); + ::CloseHandle(test); + printf("filesystem: %S\n", fs_name); + return (fs_flags & FILE_SUPPORTS_SPARSE_FILES) != 0; +} + +#else + +bool fs_supports_sparse_files() +{ + int test = ::open("test", O_RDWR | O_CREAT, 0755); + TEST_CHECK(test >= 0); + struct statfs st{}; + TEST_CHECK(fstatfs(test, &st) == 0); + ::close(test); +#ifdef TORRENT_LINUX + static long const ufs = 0x00011954; + static const std::set sparse_filesystems{ + EXT4_SUPER_MAGIC, EXT3_SUPER_MAGIC, XFS_SUPER_MAGIC, BTRFS_SUPER_MAGIC + , ufs, REISERFS_SUPER_MAGIC + }; + printf("filesystem: %ld\n", st.f_type); + return sparse_filesystems.count(st.f_type); +#else + printf("filesystem: (%d) %s\n", st.f_type, st.f_fstypename); + static const std::set sparse_filesystems{ + "ufs", "zfs", "ext4", "xfs", "apfs", "btrfs"}; + return sparse_filesystems.count(st.f_fstypename); +#endif +} + +#endif +} + +TORRENT_TEST(basic) +{ + write_file("basic-1", 10); + lt::error_code ec; + lt::copy_file("basic-1", "basic-1.copy", ec); + TEST_CHECK(!ec); + TEST_CHECK(compare_files("basic-1", "basic-1.copy")); + + write_file("basic-2", 1000000); + lt::copy_file("basic-2", "basic-2.copy", ec); + TEST_CHECK(!ec); + TEST_CHECK(compare_files("basic-2", "basic-2.copy")); +} + +#if TORRENT_HAVE_MMAP || TORRENT_HAVE_MAP_VIEW_OF_FILE +TORRENT_TEST(sparse_file) +{ + using lt::aux::file_handle; + using lt::aux::file_mapping; + using lt::aux::file_view; + namespace open_mode = lt::aux::open_mode; + + { + +#if TORRENT_HAVE_MAP_VIEW_OF_FILE + auto open_unmap_lock = std::make_shared(); +#endif + file_handle f("sparse-1", 50'000'000 + , open_mode::write | open_mode::truncate | open_mode::sparse); + auto map = std::make_shared(std::move(f), lt::aux::open_mode::write, 50'000'000 +#if TORRENT_HAVE_MAP_VIEW_OF_FILE + , open_unmap_lock +#endif + ); + file_view view = map->view(); + auto range = view.range(); + TEST_CHECK(range.size() == 50'000'000); + + range[0] = 1; + range[49'999'999] = 1; + } + + // Find out if the filesystem we're running the test on supports sparse + // files. If not, we don't expect any of the files to be sparse + bool const supports_sparse_files = fs_supports_sparse_files(); + + // make sure "sparse-1" is actually sparse +#ifdef TORRENT_WINDOWS + DWORD high; + std::int64_t const original_size = ::GetCompressedFileSizeA("sparse-1", &high); + TEST_CHECK(original_size != INVALID_FILE_SIZE); + TEST_CHECK(high == 0); +#else + struct stat st; + TEST_CHECK(::stat("sparse-1", &st) == 0); + std::int64_t const original_size = st.st_blocks * 512; +#endif + printf("original_size: %d\n", int(original_size)); + if (supports_sparse_files) + { + TEST_CHECK(original_size < 500'000); + } + else + { + TEST_CHECK(original_size >= 50'000'000); + } + + lt::error_code ec; + lt::copy_file("sparse-1", "sparse-1.copy", ec); + TEST_CHECK(!ec); + + // make sure the copy is sparse +#ifdef TORRENT_WINDOWS + WIN32_FILE_ATTRIBUTE_DATA out_stat; + TEST_CHECK(::GetFileAttributesExA("sparse-1.copy", GetFileExInfoStandard, &out_stat)); + if (supports_sparse_files) + { + TEST_CHECK(out_stat.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE); + } + else + { + TEST_CHECK((out_stat.dwFileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) == 0); + } + + TEST_EQUAL(::GetCompressedFileSizeA("sparse-1.copy", &high), original_size); + TEST_CHECK(high == 0); +#else + TEST_CHECK(::stat("sparse-1.copy", &st) == 0); + printf("copy_size: %d\n", int(st.st_blocks) * 512); + TEST_EQUAL(st.st_blocks * 512, original_size); +#endif + + TEST_CHECK(compare_files("sparse-1", "sparse-1.copy")); +} +#endif + diff --git a/test/test_create_torrent.cpp b/test/test_create_torrent.cpp index 157c757af19..0f5afec2ece 100644 --- a/test/test_create_torrent.cpp +++ b/test/test_create_torrent.cpp @@ -180,6 +180,11 @@ TORRENT_TEST(create_torrent_round_trip_hybrid) test_round_trip_torrent("v2_hybrid.torrent"); } +TORRENT_TEST(create_torrent_round_trip_empty_file) +{ + test_round_trip_torrent("v2_empty_file.torrent"); +} + // check that attempting to create a torrent containing both // a file and directory with the same name is not allowed TORRENT_TEST(v2_path_conflict) diff --git a/test/test_file.cpp b/test/test_file.cpp index 9b1b5dce740..8f5875763fa 100644 --- a/test/test_file.cpp +++ b/test/test_file.cpp @@ -121,20 +121,8 @@ TORRENT_TEST(directory) TEST_CHECK(files.count(".") == 1); files.clear(); - recursive_copy("file_test_dir", "file_test_dir2", ec); - - for (aux::directory i("file_test_dir2", ec); !i.done(); i.next(ec)) - { - std::string f = i.file(); - TEST_CHECK(files.count(f) == 0); - files.insert(f); - std::printf(" %s\n", f.c_str()); - } - remove_all("file_test_dir", ec); if (ec) std::printf("remove_all: %s\n", ec.message().c_str()); - remove_all("file_test_dir2", ec); - if (ec) std::printf("remove_all: %s\n", ec.message().c_str()); } // test path functions diff --git a/test/test_storage.cpp b/test/test_storage.cpp index efe1f52bca2..75bef3b2929 100644 --- a/test/test_storage.cpp +++ b/test/test_storage.cpp @@ -35,6 +35,7 @@ see LICENSE file. #include "libtorrent/aux_/random.hpp" #include "libtorrent/mmap_disk_io.hpp" #include "libtorrent/posix_disk_io.hpp" +#include "libtorrent/flags.hpp" #include #include // for bind @@ -74,24 +75,37 @@ void delete_dirs(std::string path) TEST_CHECK(!exists(path)); } -void on_check_resume_data(lt::status_t const status, storage_error const& error, bool* done) +void on_check_resume_data(lt::status_t const status, storage_error const& error, bool* done, bool* oversized) { std::cout << time_now_string() << " on_check_resume_data ret: " << static_cast(status); - switch (status) + if ((status & lt::status_t::oversized_file) != status_t{}) + { + std::cout << " oversized file(s) - "; + *oversized = true; + } + else + { + *oversized = false; + } + + switch (status & ~lt::status_t::mask) { case lt::status_t::no_error: - std::cout << time_now_string() << " success" << std::endl; + std::cout << " success" << std::endl; break; case lt::status_t::fatal_disk_error: - std::cout << time_now_string() << " disk error: " << error.ec.message() + std::cout << " disk error: " << error.ec.message() << " file: " << error.file() << std::endl; break; case lt::status_t::need_full_check: - std::cout << time_now_string() << " need full check" << std::endl; + std::cout << " need full check" << std::endl; break; case lt::status_t::file_exist: - std::cout << time_now_string() << " file exist" << std::endl; + std::cout << " file exist" << std::endl; + break; + case lt::status_t::mask: + case lt::status_t::oversized_file: break; } std::cout << std::endl; @@ -218,7 +232,7 @@ int writev(std::shared_ptr s , aux::open_mode_t const mode , storage_error& error) { - return s->writev(sett, bufs, piece, offset, mode, error); + return s->writev(sett, bufs, piece, offset, mode, disk_job_flags_t{}, error); } int readv(std::shared_ptr s @@ -226,10 +240,10 @@ int readv(std::shared_ptr s , span bufs , piece_index_t piece , int const offset - , aux::open_mode_t flags + , aux::open_mode_t mode , storage_error& ec) { - return s->readv(sett, bufs, piece, offset, flags, ec); + return s->readv(sett, bufs, piece, offset, mode, disk_job_flags_t{}, ec); } void release_files(std::shared_ptr s, storage_error& ec) @@ -481,9 +495,17 @@ void test_rename(std::string const& test_path) TEST_EQUAL(s->files().file_path(0_file), "new_filename"); } -void test_check_files(std::string const& test_path - , lt::storage_mode_t storage_mode) +using lt::operator""_bit; +using check_files_flag_t = lt::flags::bitfield_flag; + +constexpr check_files_flag_t sparse = 0_bit; +constexpr check_files_flag_t test_oversized = 1_bit; +constexpr check_files_flag_t zero_prio = 2_bit; + +void test_check_files(check_files_flag_t const flags + , lt::disk_io_constructor_type const disk_constructor) { + std::string const test_path = current_working_directory(); std::shared_ptr info; error_code ec; @@ -507,10 +529,13 @@ void test_check_files(std::string const& test_path create_directory(combine_path(test_path, "temp_storage"), ec); if (ec) std::cout << "create_directory: " << ec.message() << std::endl; + if (flags & test_oversized) + piece2.push_back(0x42); + ofstream(combine_path(test_path, combine_path("temp_storage", "test1.tmp")).c_str()) - .write(piece0.data(), piece_size_check); + .write(piece0.data(), std::streamsize(piece0.size())); ofstream(combine_path(test_path, combine_path("temp_storage", "test3.tmp")).c_str()) - .write(piece2.data(), piece_size_check); + .write(piece2.data(), std::streamsize(piece2.size())); std::vector buf; bencode(std::back_inserter(buf), t.generate()); @@ -522,16 +547,19 @@ void test_check_files(std::string const& test_path aux::session_settings sett; sett.set_int(settings_pack::aio_threads, 1); - std::unique_ptr io = default_disk_io_constructor(ios, sett, cnt); + std::unique_ptr io = disk_constructor(ios, sett, cnt); + + aux::vector priorities; + + if (flags & zero_prio) + priorities.resize(std::size_t(info->num_files()), download_priority_t{}); - aux::vector priorities( - std::size_t(info->num_files()), download_priority_t{}); sha1_hash info_hash; storage_params p{ fs, nullptr, test_path, - storage_mode, + (flags & sparse) ? storage_mode_sparse : storage_mode_allocate, priorities, info_hash }; @@ -539,14 +567,17 @@ void test_check_files(std::string const& test_path auto st = io->new_torrent(std::move(p), std::shared_ptr()); bool done = false; + bool oversized = false; add_torrent_params frd; aux::vector links; io->async_check_files(st, &frd, links - , std::bind(&on_check_resume_data, _1, _2, &done)); + , std::bind(&on_check_resume_data, _1, _2, &done, &oversized)); io->submit_jobs(); ios.restart(); run_until(ios, done); + TEST_EQUAL(oversized, bool(flags & test_oversized)); + for (auto const i : info->piece_range()) { done = false; @@ -618,14 +649,47 @@ void run_test() delete_dirs("temp_storage"); } -TORRENT_TEST(check_files_sparse) +#if TORRENT_HAVE_MMAP +TORRENT_TEST(check_files_sparse_mmap) { - test_check_files(current_working_directory(), storage_mode_sparse); + test_check_files(sparse | zero_prio, lt::mmap_disk_io_constructor); } -TORRENT_TEST(check_files_allocate) +TORRENT_TEST(check_files_oversized_mmap_zero_prio) +{ + test_check_files(sparse | zero_prio | test_oversized, lt::mmap_disk_io_constructor); +} + +TORRENT_TEST(check_files_oversized_mmap) +{ + test_check_files(sparse | test_oversized, lt::mmap_disk_io_constructor); +} + + +TORRENT_TEST(check_files_allocate_mmap) +{ + test_check_files(zero_prio, lt::mmap_disk_io_constructor); +} +#endif +TORRENT_TEST(check_files_sparse_posix) +{ + test_check_files(sparse | zero_prio, lt::posix_disk_io_constructor); +} + +TORRENT_TEST(check_files_oversized_zero_prio_posix) +{ + test_check_files(sparse | zero_prio | test_oversized, lt::posix_disk_io_constructor); +} + +TORRENT_TEST(check_files_oversized_posix) +{ + test_check_files(sparse | test_oversized, lt::posix_disk_io_constructor); +} + + +TORRENT_TEST(check_files_allocate_posix) { - test_check_files(current_working_directory(), storage_mode_allocate); + test_check_files(zero_prio, lt::posix_disk_io_constructor); } #if TORRENT_HAVE_MMAP @@ -1492,7 +1556,7 @@ TORRENT_TEST(dont_move_intermingled_files) iovec_t b = {&buf[0], 4}; storage_error se; - s->writev(set, b, 2_piece, 0, aux::open_mode::write, se); + s->writev(set, b, 2_piece, 0, aux::open_mode::write, disk_job_flags_t{}, se); error_code ec; create_directory(combine_path(save_path, combine_path("temp_storage" @@ -1580,7 +1644,7 @@ void test_unaligned_read(lt::disk_io_constructor_type constructor, Fun fun) fun(disk_io.get(), t, ioc, outstanding); - disk_io->remove_torrent(t); + t.reset(); disk_io->abort(true); } diff --git a/test/test_torrents/v2_empty_file.torrent b/test/test_torrents/v2_empty_file.torrent new file mode 100644 index 00000000000..6c77b9a6981 Binary files /dev/null and b/test/test_torrents/v2_empty_file.torrent differ diff --git a/test/test_truncate.cpp b/test/test_truncate.cpp new file mode 100644 index 00000000000..b3c75e6e0fd --- /dev/null +++ b/test/test_truncate.cpp @@ -0,0 +1,107 @@ +/* + +Copyright (c) 2022, Arvid Norberg +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the distribution. + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +*/ + +#include +#include + +#include "test.hpp" +#include "libtorrent/aux_/path.hpp" +#include "libtorrent/truncate.hpp" +#include "libtorrent/file_storage.hpp" +#include "libtorrent/error_code.hpp" + +namespace { + +void create_file(std::string const& name, int size) +{ + lt::error_code ec; + lt::create_directories(lt::parent_path(name), ec); + TEST_CHECK(!ec); + std::ofstream f(name.c_str()); + std::vector buf(static_cast(size)); + f.write(buf.data(), std::streamsize(buf.size())); +} + +std::int64_t file_size(std::string const& name) +{ + lt::file_status st; + lt::error_code ec; + lt::stat_file(name, &st, ec); + std::cerr << name << ": " << ec.message() << '\n'; + TEST_CHECK(!ec); + return st.file_size; +} +} + +TORRENT_TEST(truncate_small_files) +{ + using lt::combine_path; + + lt::file_storage fs; + fs.add_file(combine_path("test", "a"), 100); + fs.add_file(combine_path("test", "b"), 900); + fs.add_file(combine_path("test", "c"), 10); + + create_file(combine_path("test", "a"), 99); + create_file(combine_path("test", "b"), 899); + create_file(combine_path("test", "c"), 9); + + lt::storage_error err; + lt::truncate_files(fs, ".", err); + TEST_CHECK(!err.ec); + + TEST_EQUAL(file_size(combine_path("test", "a")), 99); + TEST_EQUAL(file_size(combine_path("test", "b")), 899); + TEST_EQUAL(file_size(combine_path("test", "c")), 9); +} + +TORRENT_TEST(truncate_large_files) +{ + using lt::combine_path; + + lt::file_storage fs; + fs.add_file(combine_path("test", "a"), 100); + fs.add_file(combine_path("test", "b"), 900); + fs.add_file(combine_path("test", "c"), 10); + + create_file(combine_path("test", "a"), 101); + create_file(combine_path("test", "b"), 901); + create_file(combine_path("test", "c"), 11); + + lt::storage_error err; + lt::truncate_files(fs, ".", err); + TEST_CHECK(!err.ec); + + TEST_EQUAL(file_size(combine_path("test", "a")), 100); + TEST_EQUAL(file_size(combine_path("test", "b")), 900); + TEST_EQUAL(file_size(combine_path("test", "c")), 10); +} diff --git a/tools/Jamfile b/tools/Jamfile index ae5d9f9c462..2c93e26a018 100644 --- a/tools/Jamfile +++ b/tools/Jamfile @@ -29,10 +29,10 @@ project tools : requirements multi # disable warning C4275: non DLL-interface classkey 'identifier' used as base for DLL-interface classkey 'identifier' - msvc:/wd4275 + msvc:/wd4275 # C4268: 'identifier' : 'const' static/global data initialized # with compiler generated default constructor fills the object with zeros - msvc:/wd4268 + msvc:/wd4268 @link_libtorrent : default-build static diff --git a/tools/linux_vmstat.py b/tools/linux_vmstat.py deleted file mode 100644 index 7b502e9e378..00000000000 --- a/tools/linux_vmstat.py +++ /dev/null @@ -1,123 +0,0 @@ -# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 - -import os -from time import time -from typing import Dict -from typing import List -from typing import Set - - -def capture_sample(pid: int, start_time: int, output: Dict[str, List[int]]) -> None: - try: - with open(f"/proc/{pid}/smaps_rollup") as f: - sample = f.read() - timestamp = int((time() - start_time) * 1000) - except Exception: - return - - if "time" not in output: - output["time"] = [timestamp] - else: - output["time"].append(timestamp) - - for line in sample.split("\n"): - if "[rollup]" in line: - continue - if line.strip() == "": - continue - - key, value = line.split(":") - val = int(value.split()[0].strip()) - key = key.strip() - - if key not in output: - output[key] = [val] - else: - output[key].append(val) - - -# example output: -# 8affffff000-7fffba926000 ---p 00000000 00:00 0 [rollup] -# Rss: 76932 kB -# Pss: 17508 kB -# Pss_Anon: 11376 kB -# Pss_File: 6101 kB -# Pss_Shmem: 30 kB -# Shared_Clean: 65380 kB -# Shared_Dirty: 88 kB -# Private_Clean: 80 kB -# Private_Dirty: 11384 kB -# Referenced: 76932 kB -# Anonymous: 11376 kB -# LazyFree: 0 kB -# AnonHugePages: 0 kB -# ShmemPmdMapped: 0 kB -# FilePmdMapped: 0 kB -# Shared_Hugetlb: 0 kB -# Private_Hugetlb: 0 kB -# Swap: 0 kB -# SwapPss: 0 kB -# Locked: 0 kB - - -def print_output_to_file(out: Dict[str, List[int]], filename: str) -> List[str]: - - if out == {}: - return [] - - with open(filename, "w+") as stats_output: - non_zero_keys: Set[str] = set() - non_zero_keys.add("time") - keys = out.keys() - for key in keys: - stats_output.write(f"{key} ") - stats_output.write("\n") - idx = 0 - while len(out["time"]) > idx: - for key in keys: - stats_output.write(f"{out[key][idx]} ") - if out[key][idx] != 0: - non_zero_keys.add(key) - stats_output.write("\n") - idx += 1 - return [k if k in non_zero_keys else "" for k in keys] - - -def plot_output(filename: str, keys: List[str]) -> None: - if "time" not in keys: - return - - output_dir, in_file = os.path.split(filename) - gnuplot_file = f"{output_dir}/plot_{in_file}.gnuplot" - with open(gnuplot_file, "w+") as f: - f.write( - f"""set term png size 1200,700 -set output "{in_file}.png" -set format y '%.0f MB' -set title "libtorrent memory usage" -set ylabel "Memory Size" -set xlabel "time (s)" -set xrange [0:*] -set yrange [2:*] -set logscale y 2 -set grid -plot """ - ) - - plot_string = "" - tidx = keys.index("time") + 1 - idx = 0 - for p in keys: - idx += 1 - if p == "time" or p == "": - continue - # escape underscores, since gnuplot interprets those as markup - p = p.replace("_", "\\\\_") - plot_string += ( - f'"{in_file}" using (${tidx}/1000):(${idx}/1024) ' - + f'title "{p}" with steps, \\\n' - ) - plot_string = plot_string[0:-4] - f.write(plot_string) - - os.system(f"(cd {output_dir}; gnuplot {os.path.split(gnuplot_file)[1]})") diff --git a/tools/run_benchmark.py b/tools/run_benchmark.py index 82f2aa02266..2f8a1c5cd8a 100755 --- a/tools/run_benchmark.py +++ b/tools/run_benchmark.py @@ -7,12 +7,22 @@ import shutil import subprocess import sys + +from vmstat import capture_sample +from vmstat import plot_output +from vmstat import print_output_to_file + import platform -from linux_vmstat import capture_sample -from linux_vmstat import plot_output -from linux_vmstat import print_output_to_file +exe = "" + +if platform.system() == "Windows": + exe = ".exe" +def reset_download(): + rm_file_or_dir('.ses_state') + rm_file_or_dir('.resume') + rm_file_or_dir('cpu_benchmark') def main(): args = parse_args() @@ -29,20 +39,20 @@ def main(): print('ERROR: build failed: %d' % ret) sys.exit(1) - rm_file_or_dir('.ses_state') - rm_file_or_dir('.resume') - rm_file_or_dir('cpu_benchmark') + reset_download() if not os.path.exists('cpu_benchmark.torrent'): - ret = os.system('../examples/connection_tester gen-torrent -s 20000 -n 15 -t cpu_benchmark.torrent') + ret = subprocess.check_call([f'../examples/connection_tester{exe}', 'gen-torrent', '-s', '100000', '-n', '15', '-t', 'cpu_benchmark.torrent']) if ret != 0: print('ERROR: connection_tester failed: %d' % ret) sys.exit(1) rm_file_or_dir('t') - run_test('download', 'upload', '-1', args.download_peers) - run_test('upload', 'download', '-G -e 240', args.download_peers) + run_test('download-write-through', 'upload', '-1 --disk_io_write_mode=write_through', args.download_peers) + reset_download() + run_test('download-full-cache', 'upload', '-1 --disk_io_write_mode=enable_os_cache', args.download_peers) + run_test('upload', 'download', '-G -e 240', args.upload_peers) def run_test(name, test_cmd, client_arg, num_peers): @@ -59,13 +69,13 @@ def run_test(name, test_cmd, client_arg, num_peers): rm_file_or_dir('session_stats') rm_file_or_dir('session_stats_report') - start = time.time() - client_cmd = f'../examples/client_test -k --listen_interfaces=127.0.0.1:{port} cpu_benchmark.torrent ' + \ + start = time.monotonic() + client_cmd = f'../examples/client_test{exe} -k --listen_interfaces=127.0.0.1:{port} cpu_benchmark.torrent ' + \ f'--disable_hash_checks=1 --enable_dht=0 --enable_lsd=0 --enable_upnp=0 --enable_natpmp=0 ' + \ f'{client_arg} -O --allow_multiple_connections_per_ip=1 --connections_limit={num_peers*2} -T {num_peers*2} ' + \ f'-f {output_dir}/events.log --alert_mask=error,status,connect,performance_warning,storage,peer' - test_cmd = f'../examples/connection_tester {test_cmd} -c {num_peers} -d 127.0.0.1 -p {port} -t cpu_benchmark.torrent' + test_cmd = f'../examples/connection_tester{exe} {test_cmd} -c {num_peers} -d 127.0.0.1 -p {port} -t cpu_benchmark.torrent' client_out = open('%s/client.out' % output_dir, 'w+') test_out = open('%s/test.out' % output_dir, 'w+') @@ -75,20 +85,16 @@ def run_test(name, test_cmd, client_arg, num_peers): print(f'test_cmd: "{test_cmd}"') t = subprocess.Popen(test_cmd.split(' '), stdout=test_out, stderr=test_out) - if platform.system() == "Linux": - out = {} - while c.returncode is None: - capture_sample(c.pid, start, out) - time.sleep(0.1) - c.poll() - end = time.time() - - stats_filename = f"{output_dir}/memory_stats.log" - keys = print_output_to_file(out, stats_filename) - plot_output(stats_filename, keys) - else: - c.wait() - end = time.time() + out = {} + while c.returncode is None: + capture_sample(c.pid, start, out) + time.sleep(0.1) + c.poll() + end = time.monotonic() + + stats_filename = f"{output_dir}/memory_stats.log" + keys = print_output_to_file(out, stats_filename) + plot_output(stats_filename, keys) t.wait() @@ -97,7 +103,7 @@ def run_test(name, test_cmd, client_arg, num_peers): print('runtime %d seconds' % (end - start)) print('analyzing profile...') - os.system('gprof ../examples/client_test >%s/gprof.out' % output_dir) + os.system(f'gprof ../examples/client_test{exe} >%s/gprof.out' % output_dir) print('generating profile graph...') try: os.system('gprof2dot --strip <%s/gprof.out | dot -Tpng -o %s/cpu_profile.png' % (output_dir, output_dir)) @@ -129,7 +135,7 @@ def rm_file_or_dir(path): def parse_args(): p = argparse.ArgumentParser() p.add_argument('--toolset', default="") - p.add_argument('--download-peers', default=50, help="Number of peers to use for upload test") + p.add_argument('--download-peers', default=50, help="Number of peers to use for download test") p.add_argument('--upload-peers', default=20, help="Number of peers to use for upload test") return p.parse_args() diff --git a/tools/vmstat.py b/tools/vmstat.py new file mode 100644 index 00000000000..75796fddac1 --- /dev/null +++ b/tools/vmstat.py @@ -0,0 +1,371 @@ +# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 + +from dataclasses import dataclass +import os +import platform +import subprocess +from time import monotonic +from typing import Dict +from typing import List +from typing import Set + + +@dataclass(frozen=True) +class Metric: + axis: str + cumulative: bool + + +metrics = { + "peak_nonpaged_pool": Metric("x1y1", False), + "nonpaged_pool": Metric("x1y1", False), + "num_page_faults": Metric("x1y2", True), + "paged_pool": Metric("x1y1", False), + "peak_paged_pool": Metric("x1y1", False), + "peak_pagefile": Metric("x1y1", False), + "peak_wset": Metric("x1y1", False), + "private": Metric("x1y1", False), + "rss": Metric("x1y1", False), + "uss": Metric("x1y1", False), + "data": Metric("x1y1", False), + "shared": Metric("x1y1", False), + "text": Metric("x1y1", False), + "dirty": Metric("x1y1", False), + "lib": Metric("x1y1", False), + "vms": Metric("x1y1", False), + "other_bytes": Metric("x1y1", True), + "other_count": Metric("x1y2", True), + "read_bytes": Metric("x1y1", True), + "read_chars": Metric("x1y1", True), + "read_count": Metric("x1y2", True), + "write_bytes": Metric("x1y1", True), + "write_chars": Metric("x1y1", True), + "write_count": Metric("x1y2", True), + "pfaults": Metric("x1y2", True), + "pageins": Metric("x1y2", True), + "minor_faults": Metric("x1y2", True), + "major_faults": Metric("x1y2", True), + "pss": Metric("x1y1", False), + "pss_anon": Metric("x1y1", False), + "pss_file": Metric("x1y1", False), + "pss_shmem": Metric("x1y1", False), + "shared_clean": Metric("x1y1", False), + "shared_dirty": Metric("x1y1", False), + "private_clean": Metric("x1y1", False), + "private_dirty": Metric("x1y1", False), + "referenced": Metric("x1y1", False), + "anonymous": Metric("x1y1", False), + "lazyfree": Metric("x1y1", False), + "anonhugepages": Metric("x1y1", False), + "shmempmdmapped": Metric("x1y1", False), + "filepmdmapped": Metric("x1y1", False), + "shared_hugetlb": Metric("x1y1", False), + "private_hugetlb": Metric("x1y1", False), + "swap": Metric("x1y1", False), + "swappss": Metric("x1y1", False), + "locked": Metric("x1y1", False), +} + + +@dataclass(frozen=True) +class Plot: + name: str + title: str + ylabel: str + y2label: str + lines: List[str] + + +plots = [ + Plot( + "memory", + "libtorrent memory usage", + "Memory Size", + "", + [ + "pss", + "pss_file", + "pss_anon", + "rss", + "dirty", + "private_dirty", + "private_clean", + "lazyfree", + "anonymous", + "vms", + "private", + "paged_pool", + ], + ), + Plot( + "vm", + "libtorrent vm stats", + "", + "count", + [ + "pfaults", + "pageins", + "num_page_faults", + "major_faults", + "minor_faults", + ], + ), + Plot( + "io", + "libtorrent disk I/O", + "Size", + "count", + [ + "other_bytes", + "other_count", + "read_bytes", + "read_chars", + "read_count", + "write_bytes", + "write_chars", + "write_count", + ], + ), +] + +if platform.system() == "Linux": + + def capture_sample( + pid: int, start_time: int, output: Dict[str, List[float]] + ) -> None: + try: + with open(f"/proc/{pid}/smaps_rollup") as f: + sample = f.read() + with open(f"/proc/{pid}/stat") as f: + sample2 = f.read() + timestamp = monotonic() - start_time + except Exception: + return + + if "time" not in output: + time_delta = timestamp - start_time + output["time"] = [timestamp] + else: + time_delta = timestamp - output["time"][-1] + output["time"].append(timestamp) + + for line in sample.split("\n"): + if "[rollup]" in line: + continue + if line.strip() == "": + continue + + key, value = line.split(":") + val = int(value.split()[0].strip()) + key = key.strip().lower() + + if key not in output: + output[key] = [val * 1024] + else: + output[key].append(val * 1024) + + stats = sample2.split() + + def add_counter(key: str, val: float) -> None: + m = metrics[key] + if key not in output: + if m.cumulative: + output[key + "-raw"] = [val] + val = val / time_delta + output[key] = [val] + else: + + if m.cumulative: + raw_val = val + val = (val - output[key + "-raw"][-1]) / time_delta + output[key + "-raw"].append(raw_val) + output[key].append(val) + + add_counter("minor_faults", float(stats[9])) + add_counter("major_faults", float(stats[11])) + + +# example output: +# 8affffff000-7fffba926000 ---p 00000000 00:00 0 [rollup] +# Rss: 76932 kB +# Pss: 17508 kB +# Pss_Anon: 11376 kB +# Pss_File: 6101 kB +# Pss_Shmem: 30 kB +# Shared_Clean: 65380 kB +# Shared_Dirty: 88 kB +# Private_Clean: 80 kB +# Private_Dirty: 11384 kB +# Referenced: 76932 kB +# Anonymous: 11376 kB +# LazyFree: 0 kB +# AnonHugePages: 0 kB +# ShmemPmdMapped: 0 kB +# FilePmdMapped: 0 kB +# Shared_Hugetlb: 0 kB +# Private_Hugetlb: 0 kB +# Swap: 0 kB +# SwapPss: 0 kB +# Locked: 0 kB + +else: + + import psutil + + def capture_sample( + pid: int, start_time: int, output: Dict[str, List[float]] + ) -> None: + try: + p = psutil.Process(pid) + mem = p.memory_full_info() + io_cnt = p.io_counters() + timestamp = monotonic() - start_time + except Exception: + return + + if "time" not in output: + time_delta = timestamp - start_time + output["time"] = [timestamp] + else: + time_delta = timestamp - output["time"][-1] + output["time"].append(timestamp) + + for key in dir(mem): + + if key not in metrics: + if not key.startswith("_") and key not in [ + "pagefile", + "wset", + "count", + "index", + ]: + print(f"missing key: {key}") + continue + + val = getattr(mem, key) + + m = metrics[key] + if key not in output: + if m.cumulative: + output[key + "-raw"] = [val] + val = val / time_delta + output[key] = [val] + else: + if m.cumulative: + raw_val = val + val = (val - output[key + "-raw"][-1]) / time_delta + output[key + "-raw"].append(raw_val) + + output[key].append(val) + + for key in dir(io_cnt): + + if key not in metrics: + if not key.startswith("_") and key not in [ + "pagefile", + "wset", + "count", + "index", + ]: + print(f"missing key: {key}") + continue + + m = metrics[key] + if key not in output: + if m.cumulative: + output[key + "-raw"] = [val] + val = val / time_delta + output[key] = [val] + else: + if m.cumulative: + raw_val = val + val = (val - output[key + "-raw"][-1]) / time_delta + output[key + "-raw"].append(raw_val) + + output[key].append(val) + + +def print_output_to_file(out: Dict[str, List[int]], filename: str) -> List[str]: + + if out == {}: + return [] + + with open(filename, "w+") as stats_output: + non_zero_keys: Set[str] = set() + non_zero_keys.add("time") + keys = out.keys() + for key in keys: + stats_output.write(f"{key} ") + stats_output.write("\n") + idx = 0 + while len(out["time"]) > idx: + for key in keys: + stats_output.write(f"{out[key][idx]:f} ") + if out[key][idx] != 0: + non_zero_keys.add(key) + stats_output.write("\n") + idx += 1 + return [k if k in non_zero_keys else "" for k in keys] + + +def plot_output(filename: str, keys: List[str]) -> None: + if "time" not in keys: + return + + output_dir, in_file = os.path.split(filename) + gnuplot_file = f"{output_dir}/plot_{in_file}.gnuplot" + with open(gnuplot_file, "w+") as f: + f.write( + """set term png size 1200,700 +set format y '%.0f' +set xlabel "time (s)" +set xrange [0:*] +set yrange [2:*] +set y2range [0:*] +set logscale y 2 +set logscale y2 2 +set grid +""" + ) + + for plot in plots: + f.write( + f"""set output "{in_file}-{plot.name}.png" +set title "{plot.title}" +set ylabel "{plot.ylabel} (MB)" +set y2label "{plot.y2label}" +{"set y2tics" if plot.y2label != "" else ""} +""" + ) + + plot_string = "plot " + tidx = keys.index("time") + 1 + idx = 0 + for p in keys: + idx += 1 + if p == "time" or p == "": + continue + + if p not in plot.lines: + continue + + m = metrics[p] + + title = p.replace("_", "\\\\_") + if m.cumulative: + title += "/s" + + divider = 1 + if m.axis == "x1y1": + divider = 1024 * 1024 + + # escape underscores, since gnuplot interprets those as markup + plot_string += ( + f'"{in_file}" using {tidx}:(${idx}/{divider}) ' + + f'title "{title}" axis {m.axis} with steps, \\\n' + ) + if len(plot_string) > 5: + plot_string = plot_string[0:-4] + "\n\n" + f.write(plot_string) + + subprocess.check_output(["gnuplot", os.path.split(gnuplot_file)[1]], cwd=output_dir)