perf: even faster wishlist (#7744)

* chore: add comments to annotate observer dependencies

* refactor: overhaul wishlist

Avoid expensive `count_active_requests` and `has_active_request_to_peer`.
- Remove "endgame"
- Never rebuild candidate list mid-download

* chore: code cleanup

* test: fix

* perf: sort block list in reverse order

* refactor: reduce request timeout from 90 to 15 seconds
This commit is contained in:
Yat Ho
2025-11-03 01:55:38 +08:00
committed by GitHub
parent 50e0ffb910
commit 2a3a8ea364
5 changed files with 327 additions and 533 deletions

View File

@@ -9,7 +9,7 @@
#include <utility>
#include <vector>
#include <small/map.hpp>
#include <small/set.hpp>
#include <small/vector.hpp>
#define LIBTRANSMISSION_PEER_MODULE
@@ -21,11 +21,6 @@
#include "libtransmission/tr-macros.h"
#include "libtransmission/peer-mgr-wishlist.h"
// Asserts in this file are expensive, so hide them in #ifdef
#ifdef TR_WISHLIST_ASSERT
#include "libtransmission/tr-assert.h"
#endif
namespace
{
[[nodiscard]] std::vector<tr_block_span_t> make_spans(small::vector<tr_block_index_t> const& blocks)
@@ -44,12 +39,8 @@ namespace
return lhs + 1U != rhs;
};
auto span_end = std::adjacent_find(span_begin, end, NotAdjacent);
if (span_end == end)
{
--span_end;
}
spans.push_back({ *span_begin, *span_end + 1 });
auto const span_end = std::min(std::adjacent_find(span_begin, end, NotAdjacent), std::prev(end));
spans.push_back({ *span_begin, *span_end + 1U });
span_begin = std::next(span_end);
}
@@ -68,14 +59,13 @@ class Wishlist::Impl
, replication{ mediator->count_piece_replication(piece_in) }
, priority{ mediator->priority(piece_in) }
, salt{ salt_in }
, mediator_{ mediator }
{
n_reqs.reserve(block_span.end - block_span.begin);
for (auto [block, end] = block_span; block < end; ++block)
unrequested.reserve(block_span.end - block_span.begin);
for (auto [begin, i] = block_span; i > begin; --i)
{
if (!mediator_->client_has_block(block))
if (auto const block = i - 1U; !mediator->client_has_block(block))
{
n_reqs.try_emplace(block, mediator_->count_active_requests(block));
unrequested.insert(block);
}
}
}
@@ -87,10 +77,17 @@ class Wishlist::Impl
return compare(that) < 0;
}
[[nodiscard]] constexpr auto block_belongs(tr_block_index_t const block) const
{
return block_span.begin <= block && block < block_span.end;
}
tr_piece_index_t piece;
tr_block_span_t block_span;
small::map<tr_block_index_t, uint8_t> n_reqs;
// This is sorted in reverse order so that smaller blocks indices
// can be taken from the end of the list, avoiding a move operation.
small::set<tr_block_index_t, small::default_inline_storage_v<tr_block_index_t>, std::greater<>> unrequested;
// Caching the following 2 values are highly beneficial, because:
// - they are often used (mainly because resort_piece() is called
@@ -102,9 +99,6 @@ class Wishlist::Impl
tr_priority_t priority;
tr_piece_index_t salt;
private:
Mediator const* mediator_;
};
using CandidateVec = std::vector<Candidate>;
@@ -114,35 +108,16 @@ public:
[[nodiscard]] std::vector<tr_block_span_t> next(
size_t n_wanted_blocks,
std::function<bool(tr_piece_index_t)> const& peer_has_piece,
std::function<bool(tr_block_index_t)> const& has_active_request_to_peer);
std::function<bool(tr_piece_index_t)> const& peer_has_piece);
private:
constexpr void set_candidates_dirty() noexcept
{
candidates_dirty_ = true;
}
// ---
TR_CONSTEXPR20 void dec_replication() noexcept
{
if (!candidates_dirty_)
{
std::for_each(
std::begin(candidates_),
std::end(candidates_),
[](Candidate& candidate) { --candidate.replication; });
}
std::for_each(std::begin(candidates_), std::end(candidates_), [](Candidate& candidate) { --candidate.replication; });
}
TR_CONSTEXPR20 void dec_replication_bitfield(tr_bitfield const& bitfield)
{
if (candidates_dirty_)
{
return;
}
if (bitfield.has_none())
{
return;
@@ -167,22 +142,11 @@ private:
TR_CONSTEXPR20 void inc_replication() noexcept
{
if (!candidates_dirty_)
{
std::for_each(
std::begin(candidates_),
std::end(candidates_),
[](Candidate& candidate) { ++candidate.replication; });
}
std::for_each(std::begin(candidates_), std::end(candidates_), [](Candidate& candidate) { ++candidate.replication; });
}
void inc_replication_bitfield(tr_bitfield const& bitfield)
{
if (candidates_dirty_)
{
return;
}
if (bitfield.has_none())
{
return;
@@ -205,13 +169,8 @@ private:
std::sort(std::begin(candidates_), std::end(candidates_));
}
TR_CONSTEXPR20 void inc_replication_piece(tr_piece_index_t piece)
TR_CONSTEXPR20 void inc_replication_piece(tr_piece_index_t const piece)
{
if (candidates_dirty_)
{
return;
}
if (auto iter = find_by_piece(piece); iter != std::end(candidates_))
{
++iter->replication;
@@ -221,89 +180,56 @@ private:
// ---
TR_CONSTEXPR20 void inc_active_request_span(tr_block_span_t block_span)
TR_CONSTEXPR20 void requested_block_span(tr_block_span_t const block_span)
{
if (candidates_dirty_)
{
return;
}
for (auto block = block_span.begin; block < block_span.end;)
{
auto it_p = find_by_block(block);
if (it_p == std::end(candidates_))
{
set_candidates_dirty();
// std::unreachable();
break;
}
auto& n_reqs = it_p->n_reqs;
auto& unreq = it_p->unrequested;
auto it_b_begin = std::begin(n_reqs);
it_b_begin = it_b_begin->first >= block_span.begin ? it_b_begin : n_reqs.lower_bound(block_span.begin);
auto it_b_end = std::end(unreq);
it_b_end = *std::prev(it_b_end) >= block_span.begin ? it_b_end : unreq.upper_bound(block_span.begin);
auto it_b_end = std::end(n_reqs);
it_b_end = std::prev(it_b_end)->first < block_span.end ? it_b_end : n_reqs.lower_bound(block_span.end);
auto it_b_begin = std::begin(unreq);
it_b_begin = *it_b_begin < block_span.end ? it_b_begin : unreq.upper_bound(block_span.end);
for (auto it_b = it_b_begin; it_b != it_b_end; ++it_b)
{
++it_b->second;
}
unreq.erase(it_b_begin, it_b_end);
block = it_p->block_span.end;
resort_piece(it_p);
}
}
TR_CONSTEXPR20 void dec_active_request_block(tr_block_index_t block)
TR_CONSTEXPR20 void reset_block(tr_block_index_t block)
{
if (candidates_dirty_)
{
return;
}
if (auto it_p = find_by_block(block); it_p != std::end(candidates_))
{
auto& n_reqs = it_p->n_reqs;
if (auto it_b = n_reqs.find(block); it_b != std::end(n_reqs) && it_b->second > 0U)
{
--it_b->second;
}
it_p->unrequested.insert(block);
resort_piece(it_p);
}
}
TR_CONSTEXPR20 void dec_active_request_bitfield(tr_bitfield const& requests)
TR_CONSTEXPR20 void reset_blocks_bitfield(tr_bitfield const& requests)
{
if (candidates_dirty_)
{
return;
}
for (auto& candidate : candidates_)
{
for (auto& [block, n_req] : candidate.n_reqs)
for (auto [begin, i] = candidate.block_span; i > begin; --i)
{
if (n_req > 0U && requests.test(block))
if (auto const block = i - 1U; requests.test(block))
{
--n_req;
candidate.unrequested.insert(block);
}
}
}
}
// ---
TR_CONSTEXPR20 void client_got_block(tr_block_index_t block)
{
if (candidates_dirty_)
{
return;
}
if (auto iter = find_by_block(block); iter != std::end(candidates_))
{
iter->n_reqs.erase(block);
resort_piece(iter);
}
std::sort(std::begin(candidates_), std::end(candidates_));
}
// ---
@@ -311,7 +237,21 @@ private:
TR_CONSTEXPR20 void peer_disconnect(tr_bitfield const& have, tr_bitfield const& requests)
{
dec_replication_bitfield(have);
dec_active_request_bitfield(requests);
reset_blocks_bitfield(requests);
}
// ---
TR_CONSTEXPR20 void got_bad_piece(tr_piece_index_t const piece)
{
if (auto iter = find_by_piece(piece); iter != std::end(candidates_))
{
for (auto [begin, i] = iter->block_span; i > begin; --i)
{
iter->unrequested.insert(i - 1U);
}
resort_piece(iter);
}
}
// ---
@@ -329,7 +269,7 @@ private:
return std::find_if(
std::begin(candidates_),
std::end(candidates_),
[block](auto const& c) { return c.block_span.begin <= block && block < c.block_span.end; });
[block](auto const& c) { return c.block_belongs(block); });
}
static constexpr tr_piece_index_t get_salt(
@@ -357,41 +297,10 @@ private:
return piece + 1U;
}
void maybe_rebuild_candidate_list()
{
if (!candidates_dirty_)
{
return;
}
candidates_dirty_ = false;
candidates_.clear();
auto salter = tr_salt_shaker<tr_piece_index_t>{};
auto const is_sequential = mediator_.is_sequential_download();
auto const n_pieces = mediator_.piece_count();
candidates_.reserve(n_pieces);
for (tr_piece_index_t piece = 0U; piece < n_pieces; ++piece)
{
if (mediator_.client_has_piece(piece) || !mediator_.client_wants_piece(piece))
{
continue;
}
auto const salt = get_salt(piece, n_pieces, salter(), is_sequential);
candidates_.emplace_back(piece, salt, &mediator_);
}
std::sort(std::begin(candidates_), std::end(candidates_));
}
// ---
void recalculate_wanted_pieces()
{
if (candidates_dirty_)
{
return;
}
auto n_old_c = std::size(candidates_);
auto salter = tr_salt_shaker<tr_piece_index_t>{};
auto const is_sequential = mediator_.is_sequential_download();
@@ -431,24 +340,43 @@ private:
TR_CONSTEXPR20 void remove_piece(tr_piece_index_t const piece)
{
if (candidates_dirty_)
{
return;
}
if (auto iter = find_by_piece(piece); iter != std::end(candidates_))
{
candidates_.erase(iter);
}
}
TR_CONSTEXPR20 void resort_piece(CandidateVec::iterator const& pos_old)
// ---
void recalculate_salt()
{
if (candidates_dirty_)
auto salter = tr_salt_shaker<tr_piece_index_t>{};
auto const is_sequential = mediator_.is_sequential_download();
auto const n_pieces = mediator_.piece_count();
for (auto& candidate : candidates_)
{
return;
candidate.salt = get_salt(candidate.piece, n_pieces, salter(), is_sequential);
}
std::sort(std::begin(candidates_), std::end(candidates_));
}
// ---
void recalculate_priority()
{
for (auto& candidate : candidates_)
{
candidate.priority = mediator_.priority(candidate.piece);
}
std::sort(std::begin(candidates_), std::end(candidates_));
}
// ---
TR_CONSTEXPR20 void resort_piece(CandidateVec::iterator const& pos_old)
{
auto const pos_begin = std::begin(candidates_);
// Candidate needs to be moved towards the front of the list
@@ -466,8 +394,6 @@ private:
}
CandidateVec candidates_;
bool candidates_dirty_ = true;
bool is_endgame_ = false;
std::array<libtransmission::ObserverTag, 14U> const tags_;
@@ -476,41 +402,64 @@ private:
Wishlist::Impl::Impl(Mediator& mediator_in)
: tags_{ {
// candidates
mediator_in.observe_files_wanted_changed([this](tr_torrent*, tr_file_index_t const*, tr_file_index_t, bool)
{ recalculate_wanted_pieces(); }),
// replication, unrequested
mediator_in.observe_peer_disconnect([this](tr_torrent*, tr_bitfield const& b, tr_bitfield const& ar)
{ peer_disconnect(b, ar); }),
mediator_in.observe_got_bad_piece([this](tr_torrent*, tr_piece_index_t) { set_candidates_dirty(); }),
// unrequested
mediator_in.observe_got_bad_piece([this](tr_torrent*, tr_piece_index_t p) { got_bad_piece(p); }),
// replication
mediator_in.observe_got_bitfield([this](tr_torrent*, tr_bitfield const& b) { inc_replication_bitfield(b); }),
mediator_in.observe_got_block([this](tr_torrent*, tr_block_index_t b) { client_got_block(b); }),
mediator_in.observe_got_choke([this](tr_torrent*, tr_bitfield const& b) { dec_active_request_bitfield(b); }),
// unrequested
mediator_in.observe_got_choke([this](tr_torrent*, tr_bitfield const& b) { reset_blocks_bitfield(b); }),
// replication
mediator_in.observe_got_have([this](tr_torrent*, tr_piece_index_t p) { inc_replication_piece(p); }),
// replication
mediator_in.observe_got_have_all([this](tr_torrent*) { inc_replication(); }),
mediator_in.observe_got_reject([this](tr_torrent*, tr_peer*, tr_block_index_t b) { dec_active_request_block(b); }),
// unrequested
mediator_in.observe_got_reject([this](tr_torrent*, tr_peer*, tr_block_index_t b) { reset_block(b); }),
// candidates
mediator_in.observe_piece_completed([this](tr_torrent*, tr_piece_index_t p) { remove_piece(p); }),
// priority
mediator_in.observe_priority_changed([this](tr_torrent*, tr_file_index_t const*, tr_file_index_t, tr_priority_t)
{ set_candidates_dirty(); }),
mediator_in.observe_sent_cancel([this](tr_torrent*, tr_peer*, tr_block_index_t b) { dec_active_request_block(b); }),
mediator_in.observe_sent_request([this](tr_torrent*, tr_peer*, tr_block_span_t bs) { inc_active_request_span(bs); }),
mediator_in.observe_sequential_download_changed([this](tr_torrent*, bool) { set_candidates_dirty(); }),
{ recalculate_priority(); }),
// unrequested
mediator_in.observe_sent_cancel([this](tr_torrent*, tr_peer*, tr_block_index_t b) { reset_block(b); }),
// unrequested
mediator_in.observe_sent_request([this](tr_torrent*, tr_peer*, tr_block_span_t bs) { requested_block_span(bs); }),
// salt
mediator_in.observe_sequential_download_changed([this](tr_torrent*, bool) { recalculate_salt(); }),
} }
, mediator_{ mediator_in }
{
auto salter = tr_salt_shaker<tr_piece_index_t>{};
auto const is_sequential = mediator_.is_sequential_download();
auto const n_pieces = mediator_.piece_count();
candidates_.reserve(n_pieces);
for (tr_piece_index_t piece = 0U; piece < n_pieces; ++piece)
{
if (mediator_.client_has_piece(piece) || !mediator_.client_wants_piece(piece))
{
continue;
}
auto const salt = get_salt(piece, n_pieces, salter(), is_sequential);
candidates_.emplace_back(piece, salt, &mediator_);
}
std::sort(std::begin(candidates_), std::end(candidates_));
}
std::vector<tr_block_span_t> Wishlist::Impl::next(
size_t n_wanted_blocks,
std::function<bool(tr_piece_index_t)> const& peer_has_piece,
std::function<bool(tr_block_index_t)> const& has_active_request_to_peer)
size_t const n_wanted_blocks,
std::function<bool(tr_piece_index_t)> const& peer_has_piece)
{
if (n_wanted_blocks == 0U)
{
return {};
}
maybe_rebuild_candidate_list();
auto const max_peers = is_endgame_ ? EndgameMaxPeers : NormalMaxPeers;
auto blocks = small::vector<tr_block_index_t>{};
blocks.reserve(n_wanted_blocks);
for (auto const& candidate : candidates_)
@@ -527,49 +476,28 @@ std::vector<tr_block_span_t> Wishlist::Impl::next(
continue;
}
// walk the blocks in this piece that we don't have
for (auto const& [block, n_req] : candidate.n_reqs)
// walk the blocks in this piece that we don't have or not requested
for (auto it = std::rbegin(candidate.unrequested), end = std::rend(candidate.unrequested); it != end; ++it)
{
if (std::size(blocks) >= n_wanted_blocks)
{
break;
}
#ifdef TR_WISHLIST_ASSERT
auto const n_req_truth = mediator_.count_active_requests(block);
TR_ASSERT_MSG(
n_req == n_req_truth,
fmt::format("piece = {}, block = {}, n_req = {}, truth = {}", candidate.piece, block, n_req, n_req_truth));
#endif
// don't request from too many peers
if (n_req >= max_peers)
{
continue;
}
// don't request block from peers which we already requested from
if (has_active_request_to_peer(block))
{
continue;
}
blocks.emplace_back(block);
blocks.emplace_back(*it);
}
}
is_endgame_ = std::size(blocks) < n_wanted_blocks;
// Ensure the list of blocks are sorted
// The list needs to be unique as well, but that should come naturally
std::sort(std::begin(blocks), std::end(blocks));
return make_spans(blocks);
}
int Wishlist::Impl::Candidate::compare(Wishlist::Impl::Candidate const& that) const noexcept
int Wishlist::Impl::Candidate::compare(Candidate const& that) const noexcept
{
// prefer pieces closer to completion
if (auto const val = tr_compare_3way(std::size(n_reqs), std::size(that.n_reqs)); val != 0)
if (auto const val = tr_compare_3way(std::size(unrequested), std::size(that.unrequested)); val != 0)
{
return val;
}
@@ -599,9 +527,8 @@ Wishlist::Wishlist(Mediator& mediator_in)
Wishlist::~Wishlist() = default;
std::vector<tr_block_span_t> Wishlist::next(
size_t n_wanted_blocks,
std::function<bool(tr_piece_index_t)> const& peer_has_piece,
std::function<bool(tr_block_index_t)> const& has_active_pending_to_peer)
size_t const n_wanted_blocks,
std::function<bool(tr_piece_index_t)> const& peer_has_piece)
{
return impl_->next(n_wanted_blocks, peer_has_piece, has_active_pending_to_peer);
return impl_->next(n_wanted_blocks, peer_has_piece);
}

View File

@@ -28,16 +28,12 @@ struct tr_peer;
class Wishlist
{
public:
static auto constexpr EndgameMaxPeers = size_t{ 2U };
static auto constexpr NormalMaxPeers = size_t{ 1U };
struct Mediator
{
[[nodiscard]] virtual bool client_has_block(tr_block_index_t block) const = 0;
[[nodiscard]] virtual bool client_has_piece(tr_piece_index_t piece) const = 0;
[[nodiscard]] virtual bool client_wants_piece(tr_piece_index_t piece) const = 0;
[[nodiscard]] virtual bool is_sequential_download() const = 0;
[[nodiscard]] virtual uint8_t count_active_requests(tr_block_index_t block) const = 0;
[[nodiscard]] virtual size_t count_piece_replication(tr_piece_index_t piece) const = 0;
[[nodiscard]] virtual tr_block_span_t block_span(tr_piece_index_t piece) const = 0;
[[nodiscard]] virtual tr_piece_index_t piece_count() const = 0;
@@ -51,8 +47,6 @@ public:
libtransmission::SimpleObservable<tr_torrent*, tr_piece_index_t>::Observer observer) = 0;
[[nodiscard]] virtual libtransmission::ObserverTag observe_got_bitfield(
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&>::Observer observer) = 0;
[[nodiscard]] virtual libtransmission::ObserverTag observe_got_block(
libtransmission::SimpleObservable<tr_torrent*, tr_block_index_t>::Observer observer) = 0;
[[nodiscard]] virtual libtransmission::ObserverTag observe_got_choke(
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&>::Observer observer) = 0;
[[nodiscard]] virtual libtransmission::ObserverTag observe_got_have(
@@ -82,8 +76,7 @@ public:
// the next blocks that we should request from a peer
[[nodiscard]] std::vector<tr_block_span_t> next(
size_t n_wanted_blocks,
std::function<bool(tr_piece_index_t)> const& peer_has_piece,
std::function<bool(tr_block_index_t)> const& has_active_pending_to_peer);
std::function<bool(tr_piece_index_t)> const& peer_has_piece);
private:
class Impl;

View File

@@ -384,7 +384,6 @@ public:
[[nodiscard]] bool client_has_piece(tr_piece_index_t piece) const override;
[[nodiscard]] bool client_wants_piece(tr_piece_index_t piece) const override;
[[nodiscard]] bool is_sequential_download() const override;
[[nodiscard]] uint8_t count_active_requests(tr_block_index_t block) const override;
[[nodiscard]] size_t count_piece_replication(tr_piece_index_t piece) const override;
[[nodiscard]] tr_block_span_t block_span(tr_piece_index_t piece) const override;
[[nodiscard]] tr_piece_index_t piece_count() const override;
@@ -399,8 +398,6 @@ public:
libtransmission::SimpleObservable<tr_torrent*, tr_piece_index_t>::Observer observer) override;
[[nodiscard]] libtransmission::ObserverTag observe_got_bitfield(
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&>::Observer observer) override;
[[nodiscard]] libtransmission::ObserverTag observe_got_block(
libtransmission::SimpleObservable<tr_torrent*, tr_block_index_t>::Observer observer) override;
[[nodiscard]] libtransmission::ObserverTag observe_got_choke(
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&>::Observer observer) override;
[[nodiscard]] libtransmission::ObserverTag observe_got_have(
@@ -670,7 +667,6 @@ public:
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const& /*bitfield*/, tr_bitfield const& /*active requests*/>
peer_disconnect;
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&> got_bitfield;
libtransmission::SimpleObservable<tr_torrent*, tr_block_index_t> got_block;
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&> got_choke;
libtransmission::SimpleObservable<tr_torrent*, tr_piece_index_t> got_have;
libtransmission::SimpleObservable<tr_torrent*> got_have_all;
@@ -910,7 +906,6 @@ private:
peer->blocks_sent_to_client.add(tr_time(), 1);
peer->blame.set(loc.piece);
tor->on_block_received(loc.block);
s->got_block.emit(tor, loc.block);
}
break;
@@ -1025,16 +1020,6 @@ bool tr_swarm::WishlistMediator::is_sequential_download() const
return tor_.is_sequential_download();
}
uint8_t tr_swarm::WishlistMediator::count_active_requests(tr_block_index_t block) const
{
auto const op = [block](uint8_t acc, auto const& peer)
{
return acc + (peer->active_requests.test(block) ? 1U : 0U);
};
return std::accumulate(std::begin(swarm_.peers), std::end(swarm_.peers), uint8_t{}, op) +
std::accumulate(std::begin(swarm_.webseeds), std::end(swarm_.webseeds), uint8_t{}, op);
}
size_t tr_swarm::WishlistMediator::count_piece_replication(tr_piece_index_t piece) const
{
auto const op = [piece](size_t acc, auto const& peer)
@@ -1094,12 +1079,6 @@ libtransmission::ObserverTag tr_swarm::WishlistMediator::observe_got_bitfield(
return swarm_.got_bitfield.observe(std::move(observer));
}
libtransmission::ObserverTag tr_swarm::WishlistMediator::observe_got_block(
libtransmission::SimpleObservable<tr_torrent*, tr_block_index_t>::Observer observer)
{
return swarm_.got_block.observe(std::move(observer));
}
libtransmission::ObserverTag tr_swarm::WishlistMediator::observe_got_choke(
libtransmission::SimpleObservable<tr_torrent*, tr_bitfield const&>::Observer observer)
{
@@ -1324,15 +1303,13 @@ void tr_peerMgrFree(tr_peerMgr* manager)
std::vector<tr_block_span_t> tr_peerMgrGetNextRequests(tr_torrent* torrent, tr_peer const* peer, size_t numwant)
{
TR_ASSERT(!torrent->is_done());
tr_swarm& swarm = *torrent->swarm;
tr_swarm const& swarm = *torrent->swarm;
TR_ASSERT(swarm.wishlist);
if (!swarm.wishlist)
{
swarm.wishlist = std::make_unique<Wishlist>(swarm.wishlist_mediator);
return {};
}
return swarm.wishlist->next(
numwant,
[peer](tr_piece_index_t p) { return peer->has_piece(p); },
[peer](tr_block_index_t b) { return peer->active_requests.test(b); });
return swarm.wishlist->next(numwant, [peer](tr_piece_index_t p) { return peer->has_piece(p); });
}
namespace
@@ -1720,6 +1697,7 @@ void tr_swarm::on_torrent_started()
auto const lock = unique_lock();
is_running = true;
manager->rechokeSoon();
wishlist = std::make_unique<Wishlist>(wishlist_mediator);
}
void tr_swarm::on_torrent_stopped()

View File

@@ -745,7 +745,7 @@ private:
static auto constexpr SendPexInterval = 90s;
// how many seconds we expect the next piece block to arrive
static auto constexpr RequestTimeoutSecs = time_t{ 90 };
static auto constexpr RequestTimeoutSecs = time_t{ 15 };
};
// ---

File diff suppressed because it is too large Load Diff