Files
transmission/libtransmission/resume.cc
Yat Ho 05aef3e787 refactor: unify quarks and strings to snake_case (#7108)
* refactor: change `leftUntilDone` to `left_until_done`

* refactor: change `magnetLink` to `magnet_link`

* refactor: change `manualAnnounceTime` to `manual_announce_time`

* refactor: change `maxConnectedPeers` to `max_connected_peers`

* refactor: change `metadataPercentComplete` to `metadata_percent_complete`

* refactor: change `peersConnected` to `peers_connected`

* refactor: change `peersFrom` to `peers_from`

* refactor: change `peersGettingFromUs` to `peers_getting_from_us`

* refactor: change `peersSendingToUs` to `peers_sending_to_us`

* refactor: change `percentComplete` to `percent_complete`

* refactor: change `percentDone` to `percent_done`

* refactor: change `pieceCount` to `piece_count`

* refactor: use quark when possible

* refactor: change `pieceSize` to `piece_size`

* refactor: change `primary-mime-type` to `primary_mime_type`

* refactor: change `rateDownload` to `rate_download`

* refactor: change `rateUpload` to `rate_upload`

* refactor: change `recheckProgress` to `recheck_progress`

* refactor: change `secondsDownloading` to `seconds_downloading`

* refactor: change `secondsSeeding` to `seconds_seeding`

* refactor: change `sizeWhenDone` to `size_when_done`

* refactor: change `startDate` to `start_date`

* refactor: change `trackerStats` to `tracker_stats`

* refactor: change `totalSize` to `total_size`

* refactor: change `torrentFile` to `torrent_file`

* refactor: change `uploadedEver` to `uploaded_ever`

* refactor: change `uploadRatio` to `upload_ratio`

* refactor: change `webseedsSendingToUs` to `webseeds_sending_to_us`

* refactor: change `bytesCompleted` to `bytes_completed`

* refactor: change `clientName` to `client_name`

* refactor: change `clientIsChoked` to `client_is_choked`

* refactor: change `clientIsInterested` to `client_is_interested`

* refactor: change `flagStr` to `flag_str`

* refactor: change `isDownloadingFrom` to `is_downloading_from`

* refactor: change `isEncrypted` to `is_encrypted`

* refactor: change `isIncoming` to `is_incoming`

* refactor: change `isUploadingTo` to `is_uploading_to`

* refactor: change `isUTP` to `is_utp`

* refactor: change `peerIsChoked` to `peer_is_choked`

* refactor: change `peerIsInterested` to `peer_is_interested`

* refactor: change `rateToClient` to `rate_to_client`

* refactor: change `rateToPeer` to `rate_to_peer`

* refactor: change `fromCache` to `from_cache`

* refactor: change `fromDht` to `from_dht`

* refactor: change `fromIncoming` to `from_incoming`

* refactor: change `fromLpd` to `from_lpd`

* refactor: change `fromLtep` to `from_ltep`

* refactor: change `fromPex` to `from_pex`

* refactor: change `fromTracker` to `from_tracker`

* refactor: change `announceState` to `announce_state`

* refactor: change `downloadCount` to `download_count`

* refactor: change `hasAnnounced` to `has_announced`

* refactor: change `hasScraped` to `has_scraped`

* refactor: change `isBackup` to `is_backup`

* refactor: change `lastAnnouncePeerCount` to `last_announce_peer_count`

* refactor: change `lastAnnounceResult` to `last_announce_result`

* refactor: change `lastAnnounceStartTime` to `last_announce_start_time`

* refactor: change `lastAnnounceSucceeded` to `last_announce_succeeded`

* refactor: change `lastAnnounceTime` to `last_announce_time`

* refactor: change `lastAnnounceTimedOut` to `last_announce_timed_out`

* refactor: change `lastScrapeResult` to `last_scrape_result`

* refactor: change `lastScrapeStartTime` to `last_scrape_start_time`

* refactor: change `lastScrapeSucceeded` to `last_scrape_succeeded`

* refactor: change `lastScrapeTime` to `last_scrape_time`

* refactor: change `lastScrapeTimedOut` to `last_scrape_timed_out`

* refactor: change `leecherCount` to `leecher_count`

* refactor: change `nextAnnounceTime` to `next_announce_time`

* refactor: change `nextScrapeTime` to `next_scrape_time`

* refactor: change `scrapeState` to `scrape_state`

* refactor: change `seederCount` to `seeder_count`

* refactor: change `torrent-added` to `torrent_added`

* refactor: change `torrent-duplicate` to `torrent_duplicate`

* refactor: change `torrent-remove` to `torrent_remove`

* refactor: change `delete-local-data` to `delete_local_data`

* refactor: change `torrent-rename-path` to `torrent_rename_path`

* refactor: change `alt-speed-down` to `alt_speed_down`

* refactor: convert `pref_toggle_entries` to quark array

* refactor: change `alt-speed-enabled` to `alt_speed_enabled`

* refactor: change `compact-view` to `compact_view`

* refactor: change `sort-reversed` to `sort_reversed`

* refactor: change `show-filterbar` to `show_filterbar`

* refactor: change `show-statusbar` to `show_statusbar`

* refactor: change `show-toolbar` to `show_toolbar`

* refactor: change `alt-speed-time-begin` to `alt_speed_time_begin`

* refactor: change `alt-speed-time-day` to `alt_speed_time_day`

* refactor: change `alt-speed-time-end` to `alt_speed_time_end`

* refactor: change `alt-speed-up` to `alt_speed_up`

* refactor: change `alt-speed-time-enabled` to `alt_speed_time_enabled`

* refactor: change `blocklist-enabled` to `blocklist_enabled`

* refactor: change `blocklist-size` to `blocklist_size`

* refactor: change `blocklist-url` to `blocklist_url`

* refactor: change `cache-size-mb` to `cache_size_mb`

* refactor: change `config-dir` to `config_dir`

* refactor: change `default-trackers` to `default_trackers`

* refactor: change `dht-enabled` to `dht_enabled`

* refactor: change `download-dir-free-space` to `download_dir_free_space`

* refactor: change `download-queue-enabled` to `download_queue_enabled`

* refactor: change `download-queue-size` to `download_queue_size`

* refactor: change `idle-seeding-limit-enabled` to `idle_seeding_limit_enabled`

* refactor: change `idle-seeding-limit` to `idle_seeding_limit`

* refactor: change `incomplete-dir-enabled` to `incomplete_dir_enabled`

* refactor: change `incomplete-dir` to `incomplete_dir`

* refactor: change `lpd-enabled` to `lpd_enabled`

* refactor: change `peer-limit-global` to `peer_limit_global`

* refactor: change `peer-limit-per-torrent` to `peer_limit_per_torrent`

* refactor: change `peer-port-random-on-start` to `peer_port_random_on_start`

* refactor: change `peer-port` to `peer_port`

* refactor: change `pex-enabled` to `pex_enabled`

* refactor: change `port-forwarding-enabled` to `port_forwarding_enabled`

* refactor: change `queue-stalled-enabled` to `queue_stalled_enabled`

* refactor: change `queue-stalled-minutes` to `queue_stalled_minutes`

* refactor: change `rename-partial-files` to `rename_partial_files`

* refactor: change `rpc-version-minimum` to `rpc_version_minimum`

* refactor: change `rpc-version-semver` to `rpc_version_semver`

* refactor: change `rpc-version` to `rpc_version`

* refactor: change `script-torrent-added-enabled` to `script_torrent_added_enabled`

* refactor: change `script-torrent-added-filename` to `script_torrent_added_filename`

* refactor: change `script-torrent-done-enabled` to `script_torrent_done_enabled`

* refactor: change `script-torrent-done-filename` to `script_torrent_done_filename`

* refactor: change `script-torrent-done-seeding-enabled` to `script_torrent_done_seeding_enabled`

* refactor: change `script-torrent-done-seeding-filename` to `script_torrent_done_seeding_filename`

* refactor: change `seed-queue-enabled` to `seed_queue_enabled`

* refactor: change `seed-queue-size` to `seed_queue_size`

* refactor: change `seedRatioLimited` to `seed_ratio_limited`

* refactor: change `session-id` to `session_id`

* refactor: change `speed-limit-down-enabled` to `speed_limit_down_enabled`

* refactor: change `speed-limit-down` to `speed_limit_down`

* refactor: change `speed-limit-up-enabled` to `speed_limit_up_enabled`

* refactor: change `speed-limit-up` to `speed_limit_up`

* refactor: change `start-added-torrents` to `start_added_torrents`

* refactor: change `trash-original-torrent-files` to `trash_original_torrent_files`

* refactor: change `utp-enabled` to `utp_enabled`

* refactor: change `tcp-enabled` to `tcp_enabled`

* docs: add missing docs for RPC `tcp_enabled`

* refactor: change `speed-units` to `speed_units`

* refactor: change `speed-bytes` to `speed_bytes`

* refactor: change `size-units` to `size_units`

* refactor: change `size-bytes` to `size_bytes`

* refactor: change `memory-units` to `memory_units`

* refactor: change `memory-bytes` to `memory_bytes`

* refactor: change `session-set` to `session_set`

* refactor: change `session-get` to `session_get`

* refactor: change `session-stats` to `session_stats`

* refactor: change `activeTorrentCount` to `active_torrent_count`

* refactor: change `downloadSpeed` to `download_speed`

* refactor: change `pausedTorrentCount` to `paused_torrent_count`

* refactor: change `torrentCount` to `torrent_count`

* refactor: change `uploadSpeed` to `upload_speed`

* refactor: change `cumulative-stats` to `cumulative_stats`

* refactor: change `current-stats` to `current_stats`

* refactor: change `uploadedBytes` and `uploaded-bytes` to `uploaded_bytes`

* refactor: change `downloadedBytes` and `downloaded-bytes` to `downloaded_bytes`

* refactor: change `filesAdded` and `files-added` to `files_added`

* refactor: change `sessionCount` and `session-count` to `session_count`

* refactor: change `secondsActive` and `seconds-active` to `seconds_active`

* refactor: change `blocklist-update` to `blocklist_update`

* refactor: change `port-test` to `port_test`

* refactor: change `session-close` to `session_close`

* refactor: change `queue-move-top` to `queue_move_top`

* refactor: change `queue-move-up` to `queue_move_up`

* refactor: change `queue-move-down` to `queue_move_down`

* refactor: change `queue-move-bottom` to `queue_move_bottom`

* refactor: change `free-space` to `free_space`

* refactor: change `group-set` to `group_set`

* refactor: change `group-get` to `group_get`

* refactor: change `announce-ip` to `announce_ip`

* refactor: change `announce-ip-enabled` to `announce_ip_enabled`

* refactor: change `upload-slots-per-torrent` to `upload_slots_per_torrent`

* refactor: change `trash-can-enabled` to `trash_can_enabled`

* refactor: change `watch-dir-enabled` to `watch_dir_enabled`

* refactor: change `watch-dir-force-generic` to `watch_dir_force_generic`

* refactor: change `watch-dir` to `watch_dir`

* refactor: change `message-level` to `message_level`

* refactor: change `scrape-paused-torrents-enabled` to `scrape_paused_torrents_enabled`

* refactor: change `torrent-added-verify-mode` to `torrent_added_verify_mode`

* refactor: change `sleep-per-seconds-during-verify` to `sleep_per_seconds_during_verify`

* refactor: change `bind-address-ipv4` to `bind_address_ipv4`

* refactor: change `bind-address-ipv6` to `bind_address_ipv6`

* refactor: change `peer-congestion-algorithm` to `peer_congestion_algorithm`

* refactor: change `peer-socket-tos` to `peer_socket_tos`

* refactor: change `peer-port-random-high` to `peer_port_random_high`

* refactor: change `peer-port-random-low` to `peer_port_random_low`

* refactor: change `anti-brute-force-enabled` to `anti_brute_force_enabled`

* refactor: change `rpc-authentication-required` to `rpc_authentication_required`

* refactor: change `rpc-bind-address` to `rpc_bind_address`

* refactor: change `rpc-enabled` to `rpc_enabled`

* refactor: change `rpc-host-whitelist` to `rpc_host_whitelist`

* refactor: change `rpc-host-whitelist-enabled` to `rpc_host_whitelist_enabled`

* refactor: change `rpc-password` to `rpc_password`

* refactor: change `rpc-port` to `rpc_port`

* refactor: change `rpc-socket-mode` to `rpc_socket_mode`

* refactor: change `rpc-url` to `rpc_url`

* refactor: change `rpc-username` to `rpc_username`

* refactor: change `rpc-whitelist` to `rpc_whitelist`

* refactor: change `rpc-whitelist-enabled` to `rpc_whitelist_enabled`

* refactor: change `ratio-limit-enabled` to `ratio_limit_enabled`

* refactor: change `ratio-limit` to `ratio_limit`

* refactor: change `show-options-window` to `show_options_window`

* refactor: change `open-dialog-dir` to `open_dialog_dir`

* refactor: change `inhibit-desktop-hibernation` to `inhibit_desktop_hibernation`

* refactor: change `show-notification-area-icon` to `show_notification_area_icon`

* refactor: change `start-minimized` to `start_minimized`

* refactor: change `torrent-added-notification-enabled` to `torrent_added_notification_enabled`

* refactor: change `anti-brute-force-threshold` to `anti_brute_force_threshold`

* refactor: change `torrent-complete-notification-enabled` to `torrent_complete_notification_enabled`

* refactor: change `prompt-before-exit` to `prompt_before_exit`

* refactor: change `sort-mode` to `sort_mode`

* refactor: change `statusbar-stats` to `statusbar_stats`

* refactor: change `show-extra-peer-details` to `show_extra_peer_details`

* refactor: change `show-backup-trackers` to `show_backup_trackers`

* refactor: change `blocklist-date` to `blocklist_date`

* refactor: change `blocklist-updates-enabled` to `blocklist_updates_enabled`

* refactor: change `main-window-layout-order` to `main_window_layout_order`

* refactor: change `main-window-height` to `main_window_height`

* refactor: change `main-window-width` to `main_window_width`

* refactor: change `main-window-x` to `main_window_x`

* refactor: change `main-window-y` to `main_window_y`

* refactor: change `filter-mode` to `filter_mode`

* refactor: change `filter-trackers` to `filter_trackers`

* refactor: change `filter-text` to `filter_text`

* refactor: change `remote-session-enabled` to `remote_session_enabled`

* refactor: change `remote-session-host` to `remote_session_host`

* refactor: change `remote-session-https` to `remote_session_https`

* refactor: change `remote-session-password` to `remote_session_password`

* refactor: change `remote-session-port` to `remote_session_port`

* refactor: change `remote-session-requres-authentication` to `remote_session_requires_authentication`

* refactor: change `remote-session-username` to `remote_session_username`

* refactor: change `torrent-complete-sound-command` to `torrent_complete_sound_command`

* refactor: change `torrent-complete-sound-enabled` to `torrent_complete_sound_enabled`

* refactor: change `user-has-given-informed-consent` to `user_has_given_informed_consent`

* refactor: change `read-clipboard` to `read_clipboard`

* refactor: change `details-window-height` to `details_window_height`

* refactor: change `details-window-width` to `details_window_width`

* refactor: change `main-window-is-maximized` to `main_window_is_maximized`

* refactor: change `port-is-open` to `port_is_open`

* refactor: change `show-tracker-scrapes` to `show_tracker_scrapes`

* refactor: change `max-peers` to `max_peers`

* refactor: change `peers2-6` to `peers2_6`

* refactor: change `seeding-time-seconds` to `seeding_time_seconds`

* refactor: change `downloading-time-seconds` to `downloading_time_seconds`

* refactor: change `ratio-mode` to `ratio_mode`

* refactor: change `idle-limit` to `idle_limit`

* refactor: change `idle-mode` to `idle_mode`

* refactor: change `speed-Bps` to `speed_Bps`

* refactor: change `use-global-speed-limit` to `use_global_speed_limit`

* refactor: change `use-speed-limit` to `use_speed_limit`

* chore: remove TODO comment

* docs: add upgrade instructions to `5.0.0`

* chore: bump rpc semver major version

* chore: housekeeping
2025-12-01 16:08:18 -06:00

996 lines
29 KiB
C++

// This file Copyright © Mnemosyne LLC.
// It may be used under GPLv2 (SPDX: GPL-2.0-only), GPLv3 (SPDX: GPL-3.0-only),
// or any future license endorsed by Mnemosyne LLC.
// License text can be found in the licenses/ folder.
#include <algorithm> // std::min
#include <array>
#include <cstdint>
#include <cstring>
#include <ctime>
#include <limits>
#include <string_view>
#include <vector>
#include <fmt/format.h>
#include "libtransmission/transmission.h"
#include "libtransmission/bitfield.h"
#include "libtransmission/error.h"
#include "libtransmission/file.h"
#include "libtransmission/log.h"
#include "libtransmission/net.h"
#include "libtransmission/peer-mgr.h" /* pex */
#include "libtransmission/quark.h"
#include "libtransmission/resume.h"
#include "libtransmission/session.h"
#include "libtransmission/torrent-ctor.h"
#include "libtransmission/torrent-metainfo.h"
#include "libtransmission/torrent.h"
#include "libtransmission/tr-assert.h"
#include "libtransmission/utils.h"
#include "libtransmission/variant.h"
using namespace std::literals;
using namespace libtransmission::Values;
namespace tr_resume
{
namespace
{
constexpr auto MaxRememberedPeers = 200U;
// ---
void save_peers(tr_variant::Map& map, tr_torrent const* tor)
{
if (auto const pex = tr_peerMgrGetPeers(tor, TR_AF_INET, TR_PEERS_INTERESTING, MaxRememberedPeers); !std::empty(pex))
{
map.insert_or_assign(TR_KEY_peers2, tr_pex::to_variant(std::data(pex), std::size(pex)));
}
if (auto const pex = tr_peerMgrGetPeers(tor, TR_AF_INET6, TR_PEERS_INTERESTING, MaxRememberedPeers); !std::empty(pex))
{
map.insert_or_assign(TR_KEY_peers2_6, tr_pex::to_variant(std::data(pex), std::size(pex)));
}
}
size_t add_peers(tr_torrent* tor, tr_variant::Vector const& l)
{
auto const n_pex = std::min(std::size(l), size_t{ MaxRememberedPeers });
auto const pex = tr_pex::from_variant(std::data(l), n_pex);
return tr_peerMgrAddPex(tor, TR_PEER_FROM_RESUME, std::data(pex), std::size(pex));
}
auto load_peers(tr_variant::Map const& map, tr_torrent* tor)
{
auto ret = tr_resume::fields_t{};
if (auto const* l = map.find_if<tr_variant::Vector>(TR_KEY_peers2); l != nullptr)
{
auto const num_added = add_peers(tor, *l);
tr_logAddTraceTor(tor, fmt::format("Loaded {} IPv4 peers from resume file", num_added));
ret = tr_resume::Peers;
}
if (auto const* l = map.find_if<tr_variant::Vector>({ TR_KEY_peers2_6, TR_KEY_peers2_6_kebab }); l != nullptr)
{
auto const num_added = add_peers(tor, *l);
tr_logAddTraceTor(tor, fmt::format("Loaded {} IPv6 peers from resume file", num_added));
ret = tr_resume::Peers;
}
return ret;
}
// ---
void save_labels(tr_variant::Map& map, tr_torrent const* tor)
{
auto const& labels = tor->labels();
auto list = tr_variant::Vector{};
list.reserve(std::size(labels));
for (auto const& label : labels)
{
list.emplace_back(tr_variant::unmanaged_string(label.sv()));
}
map.insert_or_assign(TR_KEY_labels, std::move(list));
}
tr_resume::fields_t load_labels(tr_variant::Map const& map, tr_torrent* tor)
{
auto const* const list = map.find_if<tr_variant::Vector>(TR_KEY_labels);
if (list == nullptr)
{
return {};
}
auto labels = tr_torrent::labels_t{};
labels.reserve(std::size(*list));
for (auto const& var : *list)
{
if (auto sv = var.value_if<std::string_view>(); sv && !std::empty(*sv))
{
labels.emplace_back(*sv);
}
}
tor->set_labels(labels);
return tr_resume::Labels;
}
// ---
void save_group(tr_variant::Map& map, tr_torrent const* tor)
{
map.insert_or_assign(TR_KEY_group, tr_variant::unmanaged_string(tor->bandwidth_group()));
}
tr_resume::fields_t load_group(tr_variant::Map const& map, tr_torrent* tor)
{
if (auto const sv = map.value_if<std::string_view>(TR_KEY_group); sv && !std::empty(*sv))
{
tor->set_bandwidth_group(*sv);
return tr_resume::Group;
}
return {};
}
// ---
void save_dnd(tr_variant::Map& map, tr_torrent const* tor)
{
auto const n = tor->file_count();
auto list = tr_variant::Vector{};
list.reserve(n);
for (tr_file_index_t i = 0; i < n; ++i)
{
list.emplace_back(!tr_torrentFile(tor, i).wanted);
}
map.insert_or_assign(TR_KEY_dnd, std::move(list));
}
tr_resume::fields_t load_dnd(tr_variant::Map const& map, tr_torrent* tor)
{
auto const* const list = map.find_if<tr_variant::Vector>(TR_KEY_dnd);
if (list == nullptr)
{
tr_logAddDebugTor(tor, "Couldn't load DND flags.");
return {};
}
auto const n = tor->file_count();
if (std::size(*list) != n)
{
tr_logAddDebugTor(
tor,
fmt::format(
"Couldn't load DND flags. DND list {} has {} children; torrent has {} files",
fmt::ptr(list),
std::size(*list),
n));
return {};
}
auto wanted = std::vector<tr_file_index_t>{};
auto unwanted = std::vector<tr_file_index_t>{};
wanted.reserve(n);
unwanted.reserve(n);
for (tr_file_index_t i = 0; i < n; ++i)
{
if ((*list)[i].value_if<bool>().value_or(false))
{
unwanted.push_back(i);
}
else
{
wanted.push_back(i);
}
}
tor->init_files_wanted(std::data(unwanted), std::size(unwanted), false);
tor->init_files_wanted(std::data(wanted), std::size(wanted), true);
return tr_resume::Dnd;
}
// ---
void save_file_priorities(tr_variant::Map& map, tr_torrent const* tor)
{
auto const n = tor->file_count();
auto list = tr_variant::Vector{};
list.reserve(n);
for (tr_file_index_t i = 0; i < n; ++i)
{
list.emplace_back(tr_torrentFile(tor, i).priority);
}
map.insert_or_assign(TR_KEY_priority, std::move(list));
}
tr_resume::fields_t load_file_priorities(tr_variant::Map const& map, tr_torrent* tor)
{
auto const* const list = map.find_if<tr_variant::Vector>(TR_KEY_priority);
auto const n = tor->file_count();
if (list == nullptr || std::size(*list) != n)
{
return {};
}
for (tr_file_index_t i = 0; i < n; ++i)
{
if (auto const priority = (*list)[i].value_if<int64_t>(); priority)
{
tor->set_file_priority(i, static_cast<tr_priority_t>(*priority));
}
}
return tr_resume::FilePriorities;
}
// ---
tr_variant::Map save_single_speed_limit(tr_torrent const* tor, tr_direction dir)
{
auto map = tr_variant::Map{ 3 };
map.try_emplace(TR_KEY_speed_Bps, tor->speed_limit(dir).base_quantity());
map.try_emplace(TR_KEY_use_global_speed_limit, tor->uses_session_limits());
map.try_emplace(TR_KEY_use_speed_limit, tor->uses_speed_limit(dir));
return map;
}
void save_speed_limits(tr_variant::Map& map, tr_torrent const* tor)
{
map.insert_or_assign(TR_KEY_speed_limit_down, save_single_speed_limit(tor, TR_DOWN));
map.insert_or_assign(TR_KEY_speed_limit_up, save_single_speed_limit(tor, TR_UP));
}
void save_ratio_limits(tr_variant::Map& map, tr_torrent const* tor)
{
auto d = tr_variant::Map{ 2 };
d.try_emplace(TR_KEY_ratio_limit, tor->seed_ratio());
d.try_emplace(TR_KEY_ratio_mode, tor->seed_ratio_mode());
map.insert_or_assign(TR_KEY_ratio_limit, std::move(d));
}
void save_idle_limits(tr_variant::Map& map, tr_torrent const* tor)
{
auto d = tr_variant::Map{ 2 };
d.try_emplace(TR_KEY_idle_limit, tor->idle_limit_minutes());
d.try_emplace(TR_KEY_idle_mode, tor->idle_limit_mode());
map.insert_or_assign(TR_KEY_idle_limit, std::move(d));
}
void load_single_speed_limit(tr_variant::Map const& map, tr_direction dir, tr_torrent* tor)
{
if (auto const i = map.value_if<int64_t>({ TR_KEY_speed_Bps, TR_KEY_speed_Bps_kebab }); i)
{
tor->set_speed_limit(dir, Speed{ *i, Speed::Units::Byps });
}
else if (auto const i2 = map.value_if<int64_t>(TR_KEY_speed); i2)
{
tor->set_speed_limit(dir, Speed{ *i2, Speed::Units::KByps });
}
if (auto const b = map.value_if<bool>({ TR_KEY_use_speed_limit, TR_KEY_use_speed_limit_kebab }); b)
{
tor->use_speed_limit(dir, *b);
}
if (auto const b = map.value_if<bool>({ TR_KEY_use_global_speed_limit, TR_KEY_use_global_speed_limit_kebab }); b)
{
tr_torrentUseSessionLimits(tor, *b);
}
}
auto load_speed_limits(tr_variant::Map const& map, tr_torrent* tor)
{
auto ret = tr_resume::fields_t{};
if (auto const* child = map.find_if<tr_variant::Map>({ TR_KEY_speed_limit_up, TR_KEY_speed_limit_up_kebab });
child != nullptr)
{
load_single_speed_limit(*child, TR_UP, tor);
ret = tr_resume::Speedlimit;
}
if (auto const* child = map.find_if<tr_variant::Map>({ TR_KEY_speed_limit_down, TR_KEY_speed_limit_down_kebab });
child != nullptr)
{
load_single_speed_limit(*child, TR_DOWN, tor);
ret = tr_resume::Speedlimit;
}
return ret;
}
tr_resume::fields_t load_ratio_limits(tr_variant::Map const& map, tr_torrent* tor)
{
auto const* const d = map.find_if<tr_variant::Map>({ TR_KEY_ratio_limit, TR_KEY_ratio_limit_kebab });
if (d == nullptr)
{
return {};
}
if (auto const dratio = d->value_if<double>({ TR_KEY_ratio_limit, TR_KEY_ratio_limit_kebab }); dratio)
{
tor->set_seed_ratio(*dratio);
}
if (auto const i = d->value_if<int64_t>({ TR_KEY_ratio_mode, TR_KEY_ratio_mode_kebab }); i)
{
tor->set_seed_ratio_mode(static_cast<tr_ratiolimit>(*i));
}
return tr_resume::Ratiolimit;
}
tr_resume::fields_t load_idle_limits(tr_variant::Map const& map, tr_torrent* tor)
{
auto const* const d = map.find_if<tr_variant::Map>({ TR_KEY_idle_limit, TR_KEY_idle_limit_kebab });
if (d == nullptr)
{
return {};
}
if (auto const imin = d->value_if<int64_t>({ TR_KEY_idle_limit, TR_KEY_idle_limit_kebab }); imin)
{
tor->set_idle_limit_minutes(*imin);
}
if (auto const i = d->value_if<int64_t>({ TR_KEY_idle_mode, TR_KEY_idle_mode_kebab }); i)
{
tor->set_idle_limit_mode(static_cast<tr_idlelimit>(*i));
}
return tr_resume::Idlelimit;
}
// ---
void save_name(tr_variant::Map& map, tr_torrent const* tor)
{
map.insert_or_assign(TR_KEY_name, tr_variant::unmanaged_string(tor->name()));
}
tr_resume::fields_t load_name(tr_variant::Map const& map, tr_torrent* tor)
{
auto const o_name = map.value_if<std::string_view>(TR_KEY_name);
if (!o_name)
{
return {};
}
auto const& name = tr_strv_strip(*o_name);
if (std::empty(name))
{
return {};
}
tor->set_name(name);
return tr_resume::Name;
}
// ---
void save_filenames(tr_variant::Map& map, tr_torrent const* tor)
{
auto const n = tor->file_count();
auto list = tr_variant::Vector{};
list.reserve(n);
for (tr_file_index_t i = 0; i < n; ++i)
{
list.emplace_back(tr_variant::unmanaged_string(tor->file_subpath(i)));
}
map.insert_or_assign(TR_KEY_files, std::move(list));
}
tr_resume::fields_t load_filenames(tr_variant::Map const& map, tr_torrent* tor)
{
auto const* const list = map.find_if<tr_variant::Vector>(TR_KEY_files);
if (list == nullptr)
{
return {};
}
auto const n_files = tor->file_count();
auto const n_list = std::size(*list);
for (tr_file_index_t i = 0; i < n_files && i < n_list; ++i)
{
if (auto const sv = (*list)[i].value_if<std::string_view>(); sv && !std::empty(*sv))
{
tor->set_file_subpath(i, *sv);
}
}
return tr_resume::Filenames;
}
// ---
tr_variant bitfield_to_raw(tr_bitfield const& b)
{
if (b.has_none() || std::empty(b))
{
return tr_variant::unmanaged_string("none"sv);
}
if (b.has_all())
{
return tr_variant::unmanaged_string("all"sv);
}
return tr_variant::make_raw(b.raw());
}
void raw_to_bitfield(tr_bitfield& bitfield, std::string_view const raw)
{
if (std::empty(raw) || raw == "none"sv)
{
bitfield.set_has_none();
}
else if (raw == "all"sv)
{
bitfield.set_has_all();
}
else
{
bitfield.set_raw(reinterpret_cast<uint8_t const*>(std::data(raw)), std::size(raw));
}
}
void save_progress(tr_variant::Map& map, tr_torrent::ResumeHelper const& helper)
{
auto prog = tr_variant::Map{ 3 };
// add the mtimes
auto const& mtimes = helper.file_mtimes();
auto const n = std::size(mtimes);
auto l = tr_variant::Vector{};
l.reserve(n);
for (auto const& mtime : mtimes)
{
l.emplace_back(mtime);
}
prog.try_emplace(TR_KEY_mtimes, std::move(l));
// add the 'checked pieces' bitfield
prog.try_emplace(TR_KEY_pieces, bitfield_to_raw(helper.checked_pieces()));
// add the blocks bitfield
prog.try_emplace(TR_KEY_blocks, bitfield_to_raw(helper.blocks()));
map.insert_or_assign(TR_KEY_progress, std::move(prog));
}
/*
* Transmission has iterated through a few strategies here, so the
* code has some added complexity to support older approaches.
*
* Current approach: 'progress' is a dict with two entries:
* - 'pieces' a bitfield for whether each piece has been checked.
* - 'mtimes', an array of per-file timestamps
* On startup, 'pieces' is loaded. Then we check to see if the disk
* mtimes differ from the 'mtimes' list. Changed files have their
* pieces cleared from the bitset.
*
* Second approach (2.20 - 3.00): the 'progress' dict had a
* 'time_checked' entry which was a list with file_count items.
* Each item was either a list of per-piece timestamps, or a
* single timestamp if either all or none of the pieces had been
* tested more recently than the file's mtime.
*
* First approach (pre-2.20) had an "mtimes" list identical to
* the current approach, but not the 'pieces' bitfield.
*/
tr_resume::fields_t load_progress(tr_variant::Map const& map, tr_torrent* tor, tr_torrent::ResumeHelper& helper)
{
auto const* const prog = map.find_if<tr_variant::Map>(TR_KEY_progress);
if (prog == nullptr)
{
return {};
}
/// CHECKED PIECES
auto checked = tr_bitfield{ tor->piece_count() };
auto mtimes = std::vector<time_t>{};
auto const n_files = tor->file_count();
mtimes.reserve(n_files);
// try to load mtimes
if (auto const* l = prog->find_if<tr_variant::Vector>(TR_KEY_mtimes); l != nullptr)
{
for (auto const& var : *l)
{
auto const t = var.value_if<int64_t>();
if (!t)
{
break;
}
mtimes.push_back(*t);
}
}
// try to load the piece-checked bitfield
if (auto const sv = prog->value_if<std::string_view>(TR_KEY_pieces); sv)
{
raw_to_bitfield(checked, *sv);
}
// maybe it's a .resume file from [2.20 - 3.00] with the per-piece mtimes
if (auto const* l = prog->find_if<tr_variant::Vector>(TR_KEY_time_checked); l != nullptr)
{
for (tr_file_index_t fi = 0, n_l = std::min(n_files, std::size(*l)); fi < n_l; ++fi)
{
auto const& b = (*l)[fi];
auto time_checked = time_t{};
if (auto const t = b.value_if<int64_t>(); t)
{
time_checked = static_cast<time_t>(*t);
}
else if (auto const* ll = b.get_if<tr_variant::Vector>(); ll != nullptr)
{
// The first element (idx 0) stores a base value for all piece timestamps,
// which would be the value of the smallest piece timestamp minus 1.
//
// The rest of the elements are the timestamp of each piece, stored as
// an offset to the base value.
// i.e. idx 1 <-> piece 0, idx 2 <-> piece 1, ...
// timestamp of piece n = idx 0 + idx n+1
//
// Pieces that haven't been checked will have a timestamp offset of 0.
// They can be differentiated from the oldest checked piece(s) since the
// offset for any checked pieces will be at least 1.
auto const base = (*ll)[0].value_if<int64_t>().value_or(0);
auto const [piece_begin, piece_end] = tor->piece_span_for_file(fi);
auto const n_ll = std::size(*ll);
auto const n_pieces = piece_end - piece_begin;
time_checked = std::numeric_limits<time_t>::max();
for (tr_piece_index_t i = 1; time_checked > time_t{} && i <= n_pieces && i < n_ll; ++i)
{
auto const offset = (*ll)[i].value_if<int64_t>().value_or(0);
time_checked = std::min(time_checked, offset != 0 ? static_cast<time_t>(base + offset) : time_t{});
}
}
mtimes.push_back(time_checked);
}
}
if (std::size(mtimes) != n_files)
{
tr_logAddDebugTor(tor, fmt::format("Couldn't load mtimes: expected {} got {}", std::size(mtimes), n_files));
// if resizing grows the vector, we'll get 0 mtimes for the
// new items which is exactly what we want since the pieces
// in an unknown state should be treated as untested
mtimes.resize(n_files);
}
helper.load_checked_pieces(checked, std::data(mtimes));
/// COMPLETION
auto blocks = tr_bitfield{ tor->block_count() };
char const* err = nullptr;
if (auto const b = prog->find(TR_KEY_blocks); b != std::end(*prog))
{
if (auto const sv = b->second.value_if<std::string_view>(); sv)
{
raw_to_bitfield(blocks, *sv);
}
else
{
err = "Invalid value for 'blocks'";
}
}
else if (auto const raw = prog->value_if<std::string_view>(TR_KEY_bitfield); raw)
{
blocks.set_raw(reinterpret_cast<uint8_t const*>(std::data(*raw)), std::size(*raw));
}
else
{
err = "Couldn't find 'blocks' or 'bitfield'";
}
if (err != nullptr)
{
tr_logAddDebugTor(tor, fmt::format("Torrent needs to be verified - {}", err));
}
else
{
helper.load_blocks(blocks);
}
return tr_resume::Progress;
}
// ---
tr_resume::fields_t load_from_file(tr_torrent* tor, tr_torrent::ResumeHelper& helper, tr_resume::fields_t fields_to_load)
{
TR_ASSERT(tr_isTorrent(tor));
tr_torrent_metainfo::migrate_file(tor->session->resumeDir(), tor->name(), tor->info_hash_string(), ".resume"sv);
auto const filename = tor->resume_file();
auto benc = std::vector<char>{};
if (!tr_sys_path_exists(filename) || !tr_file_read(filename, benc))
{
return {};
}
auto serde = tr_variant_serde::benc();
auto otop = serde.inplace().parse(benc);
if (!otop)
{
tr_logAddDebugTor(tor, fmt::format("Couldn't read '{}': {}", filename, serde.error_.message()));
return {};
}
auto const* const p_map = otop->get_if<tr_variant::Map>();
if (p_map == nullptr)
{
tr_logAddDebugTor(tor, fmt::format("Resume file '{}' does not contain a benc dict", filename));
return {};
}
auto const& map = *p_map;
tr_logAddDebugTor(tor, fmt::format("Read resume file '{}'", filename));
auto fields_loaded = tr_resume::fields_t{};
if ((fields_to_load & tr_resume::Corrupt) != 0)
{
if (auto i = map.value_if<int64_t>(TR_KEY_corrupt); i)
{
tor->bytes_corrupt_.set_prev(*i);
fields_loaded |= tr_resume::Corrupt;
}
}
if ((fields_to_load & (tr_resume::Progress | tr_resume::DownloadDir)) != 0)
{
if (auto sv = map.value_if<std::string_view>(TR_KEY_destination); sv && !std::empty(*sv))
{
helper.load_download_dir(*sv);
fields_loaded |= tr_resume::DownloadDir;
}
}
if ((fields_to_load & (tr_resume::Progress | tr_resume::IncompleteDir)) != 0)
{
if (auto sv = map.value_if<std::string_view>({ TR_KEY_incomplete_dir, TR_KEY_incomplete_dir_kebab });
sv && !std::empty(*sv))
{
helper.load_incomplete_dir(*sv);
fields_loaded |= tr_resume::IncompleteDir;
}
}
if ((fields_to_load & tr_resume::Downloaded) != 0)
{
if (auto i = map.value_if<int64_t>(TR_KEY_downloaded); i)
{
tor->bytes_downloaded_.set_prev(*i);
fields_loaded |= tr_resume::Downloaded;
}
}
if ((fields_to_load & tr_resume::Uploaded) != 0)
{
if (auto i = map.value_if<int64_t>(TR_KEY_uploaded); i)
{
tor->bytes_uploaded_.set_prev(*i);
fields_loaded |= tr_resume::Uploaded;
}
}
if ((fields_to_load & tr_resume::MaxPeers) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_max_peers, TR_KEY_max_peers_kebab }); i)
{
tor->set_peer_limit(static_cast<uint16_t>(*i));
fields_loaded |= tr_resume::MaxPeers;
}
}
if ((fields_to_load & tr_resume::Run) != 0)
{
if (auto b = map.value_if<bool>(TR_KEY_paused); b)
{
helper.load_start_when_stable(!*b);
fields_loaded |= tr_resume::Run;
}
}
if ((fields_to_load & tr_resume::AddedDate) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_added_date, TR_KEY_added_date_kebab }); i)
{
helper.load_date_added(static_cast<time_t>(*i));
fields_loaded |= tr_resume::AddedDate;
}
}
if ((fields_to_load & tr_resume::DoneDate) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_done_date, TR_KEY_done_date_kebab }); i)
{
helper.load_date_done(static_cast<time_t>(*i));
fields_loaded |= tr_resume::DoneDate;
}
}
if ((fields_to_load & tr_resume::ActivityDate) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_activity_date, TR_KEY_activity_date_kebab }); i)
{
tor->set_date_active(*i);
fields_loaded |= tr_resume::ActivityDate;
}
}
if ((fields_to_load & tr_resume::TimeSeeding) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_seeding_time_seconds, TR_KEY_seeding_time_seconds_kebab }); i)
{
helper.load_seconds_seeding_before_current_start(*i);
fields_loaded |= tr_resume::TimeSeeding;
}
}
if ((fields_to_load & tr_resume::TimeDownloading) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_downloading_time_seconds, TR_KEY_downloading_time_seconds_kebab }); i)
{
helper.load_seconds_downloading_before_current_start(*i);
fields_loaded |= tr_resume::TimeDownloading;
}
}
if ((fields_to_load & tr_resume::BandwidthPriority) != 0)
{
if (auto i = map.value_if<int64_t>({ TR_KEY_bandwidth_priority, TR_KEY_bandwidth_priority_kebab });
i && tr_isPriority(static_cast<tr_priority_t>(*i)))
{
tr_torrentSetPriority(tor, static_cast<tr_priority_t>(*i));
fields_loaded |= tr_resume::BandwidthPriority;
}
}
if ((fields_to_load & tr_resume::SequentialDownload) != 0)
{
if (auto b = map.value_if<bool>(TR_KEY_sequential_download); b)
{
tor->set_sequential_download(*b);
fields_loaded |= tr_resume::SequentialDownload;
}
}
if ((fields_to_load & tr_resume::SequentialDownloadFromPiece) != 0)
{
if (auto i = map.value_if<int64_t>(TR_KEY_sequential_download_from_piece); i)
{
tor->set_sequential_download_from_piece(*i);
fields_loaded |= tr_resume::SequentialDownloadFromPiece;
}
}
if ((fields_to_load & tr_resume::Peers) != 0)
{
fields_loaded |= load_peers(map, tor);
}
// Note: load_filenames() must come before load_progress()
// so that load_progress() -> helper.load_checked_pieces() -> tor_.find_file()
// will know where to look
if ((fields_to_load & tr_resume::Filenames) != 0)
{
fields_loaded |= load_filenames(map, tor);
}
// Note: load_progress() should come before load_file_priorities()
// so that we can skip loading priorities iff the torrent is a
// seed or a partial seed.
if ((fields_to_load & tr_resume::Progress) != 0)
{
fields_loaded |= load_progress(map, tor, helper);
}
if (!tor->is_done() && (fields_to_load & tr_resume::FilePriorities) != 0)
{
fields_loaded |= load_file_priorities(map, tor);
}
if ((fields_to_load & tr_resume::Dnd) != 0)
{
fields_loaded |= load_dnd(map, tor);
}
if ((fields_to_load & tr_resume::Speedlimit) != 0)
{
fields_loaded |= load_speed_limits(map, tor);
}
if ((fields_to_load & tr_resume::Ratiolimit) != 0)
{
fields_loaded |= load_ratio_limits(map, tor);
}
if ((fields_to_load & tr_resume::Idlelimit) != 0)
{
fields_loaded |= load_idle_limits(map, tor);
}
if ((fields_to_load & tr_resume::Name) != 0)
{
fields_loaded |= load_name(map, tor);
}
if ((fields_to_load & tr_resume::Labels) != 0)
{
fields_loaded |= load_labels(map, tor);
}
if ((fields_to_load & tr_resume::Group) != 0)
{
fields_loaded |= load_group(map, tor);
}
return fields_loaded;
}
auto set_from_ctor(
tr_torrent* tor,
tr_torrent::ResumeHelper& helper,
tr_resume::fields_t const fields,
tr_ctor const& ctor,
tr_ctorMode const mode)
{
auto ret = tr_resume::fields_t{};
if ((fields & tr_resume::Run) != 0)
{
if (auto const val = ctor.paused(mode); val)
{
helper.load_start_when_stable(!*val);
ret |= tr_resume::Run;
}
}
if ((fields & tr_resume::MaxPeers) != 0)
{
if (auto const val = ctor.peer_limit(mode); val)
{
tor->set_peer_limit(*val);
ret |= tr_resume::MaxPeers;
}
}
if ((fields & tr_resume::DownloadDir) != 0)
{
if (auto const& val = ctor.download_dir(mode); !std::empty(val))
{
helper.load_download_dir(val);
ret |= tr_resume::DownloadDir;
}
}
if ((fields & tr_resume::SequentialDownload) != 0)
{
if (auto const& val = ctor.sequential_download(mode); val)
{
tor->set_sequential_download(*val);
ret |= tr_resume::SequentialDownload;
}
}
if ((fields & tr_resume::SequentialDownloadFromPiece) != 0)
{
if (auto const& val = ctor.sequential_download_from_piece(mode); val)
{
tor->set_sequential_download_from_piece(*val);
ret |= tr_resume::SequentialDownloadFromPiece;
}
}
return ret;
}
auto use_mandatory_fields(
tr_torrent* const tor,
tr_torrent::ResumeHelper& helper,
tr_resume::fields_t const fields,
tr_ctor const& ctor)
{
return set_from_ctor(tor, helper, fields, ctor, TR_FORCE);
}
auto use_fallback_fields(
tr_torrent* const tor,
tr_torrent::ResumeHelper& helper,
tr_resume::fields_t const fields,
tr_ctor const& ctor)
{
return set_from_ctor(tor, helper, fields, ctor, TR_FALLBACK);
}
} // namespace
fields_t load(tr_torrent* tor, tr_torrent::ResumeHelper& helper, fields_t fields_to_load, tr_ctor const& ctor)
{
TR_ASSERT(tr_isTorrent(tor));
auto ret = fields_t{};
ret |= use_mandatory_fields(tor, helper, fields_to_load, ctor);
fields_to_load &= ~ret;
ret |= load_from_file(tor, helper, fields_to_load);
fields_to_load &= ~ret;
ret |= use_fallback_fields(tor, helper, fields_to_load, ctor);
return ret;
}
void save(tr_torrent* const tor, tr_torrent::ResumeHelper const& helper)
{
if (!tr_isTorrent(tor))
{
return;
}
auto map = tr_variant::Map{ 50 }; // arbitrary "big enough" number
auto const now = tr_time();
map.try_emplace(TR_KEY_seeding_time_seconds, helper.seconds_seeding(now));
map.try_emplace(TR_KEY_downloading_time_seconds, helper.seconds_downloading(now));
map.try_emplace(TR_KEY_activity_date, helper.date_active());
map.try_emplace(TR_KEY_added_date, helper.date_added());
map.try_emplace(TR_KEY_corrupt, tor->bytes_corrupt_.ever());
map.try_emplace(TR_KEY_done_date, helper.date_done());
map.try_emplace(TR_KEY_destination, tr_variant::unmanaged_string(tor->download_dir().sv()));
if (!std::empty(tor->incomplete_dir()))
{
map.try_emplace(TR_KEY_incomplete_dir, tr_variant::unmanaged_string(tor->incomplete_dir().sv()));
}
map.try_emplace(TR_KEY_downloaded, tor->bytes_downloaded_.ever());
map.try_emplace(TR_KEY_uploaded, tor->bytes_uploaded_.ever());
map.try_emplace(TR_KEY_max_peers, tor->peer_limit());
map.try_emplace(TR_KEY_bandwidth_priority, tor->get_priority());
map.try_emplace(TR_KEY_paused, !helper.start_when_stable());
map.try_emplace(TR_KEY_sequential_download, tor->is_sequential_download());
map.try_emplace(TR_KEY_sequential_download_from_piece, tor->sequential_download_from_piece());
save_peers(map, tor);
if (tor->has_metainfo())
{
save_file_priorities(map, tor);
save_dnd(map, tor);
save_progress(map, helper);
}
save_speed_limits(map, tor);
save_ratio_limits(map, tor);
save_idle_limits(map, tor);
save_filenames(map, tor);
save_name(map, tor);
save_labels(map, tor);
save_group(map, tor);
auto serde = tr_variant_serde::benc();
if (!serde.to_file(std::move(map), tor->resume_file()))
{
tor->error().set_local_error(fmt::format("Unable to save resume file: {:s}", serde.error_.message()));
}
}
} // namespace tr_resume