fix: sonarcloud (#4453)

This commit is contained in:
Charles Kerr
2022-12-23 10:56:27 -06:00
committed by GitHub
parent a1892f2c7c
commit 14a3d01e45
6 changed files with 25 additions and 38 deletions

View File

@@ -45,10 +45,7 @@ using namespace std::literals;
/* unless the tracker says otherwise, rescrape this frequently */ /* unless the tracker says otherwise, rescrape this frequently */
static auto constexpr DefaultScrapeIntervalSec = int{ 60 * 30 }; static auto constexpr DefaultScrapeIntervalSec = int{ 60 * 30 };
/* unless the tracker says otherwise, this is the announce interval */
static auto constexpr DefaultAnnounceIntervalSec = int{ 60 * 10 };
/* unless the tracker says otherwise, this is the announce min_interval */
static auto constexpr DefaultAnnounceMinIntervalSec = int{ 60 * 2 };
/* the value of the 'numwant' argument passed in tracker requests. */ /* the value of the 'numwant' argument passed in tracker requests. */
static auto constexpr Numwant = int{ 80 }; static auto constexpr Numwant = int{ 80 };
@@ -528,6 +525,12 @@ struct tr_tier
bool isScraping = false; bool isScraping = false;
private: private:
// unless the tracker says otherwise, this is the announce interval
static auto constexpr DefaultAnnounceIntervalSec = int{ 60 * 10 };
// unless the tracker says otherwise, this is the announce min_interval
static auto constexpr DefaultAnnounceMinIntervalSec = int{ 60 * 2 };
[[nodiscard]] static time_t getNextScrapeTime(tr_session const* session, tr_tier const* tier, int interval) [[nodiscard]] static time_t getNextScrapeTime(tr_session const* session, tr_tier const* tier, int interval)
{ {
// Maybe don't scrape paused torrents // Maybe don't scrape paused torrents

View File

@@ -58,21 +58,6 @@ using tr_socket_t = int;
#define sockerrno errno #define sockerrno errno
#endif #endif
/****
*****
***** tr_address
*****
****/
enum tr_address_type
{
TR_AF_INET,
TR_AF_INET6,
NUM_TR_AF_INET_TYPES
};
struct tr_address;
/** /**
* Literally just a port number. * Literally just a port number.
* *
@@ -149,6 +134,13 @@ private:
uint16_t hport_ = 0; uint16_t hport_ = 0;
}; };
enum tr_address_type
{
TR_AF_INET,
TR_AF_INET6,
NUM_TR_AF_INET_TYPES
};
struct tr_address struct tr_address
{ {
[[nodiscard]] static std::optional<tr_address> from_string(std::string_view address_sv); [[nodiscard]] static std::optional<tr_address> from_string(std::string_view address_sv);

View File

@@ -903,10 +903,8 @@ static void peerSuggestedPiece(
void tr_peerMgrPieceCompleted(tr_torrent* tor, tr_piece_index_t p) void tr_peerMgrPieceCompleted(tr_torrent* tor, tr_piece_index_t p)
{ {
bool piece_came_from_peers = false; bool piece_came_from_peers = false;
tr_swarm* const s = tor->swarm;
/* walk through our peers */ for (auto* const peer : tor->swarm->peers)
for (auto* const peer : s->peers)
{ {
// notify the peer that we now have this piece // notify the peer that we now have this piece
peer->on_piece_completed(p); peer->on_piece_completed(p);
@@ -2648,10 +2646,7 @@ struct peer_candidate
auto const now_msec = tr_time_msec(); auto const now_msec = tr_time_msec();
// leave 5% of connection slots for incoming connections -- ticket #2609 // leave 5% of connection slots for incoming connections -- ticket #2609
auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95); if (auto const max_candidates = static_cast<size_t>(session->peerLimit() * 0.95); max_candidates <= tr_peerMsgs::size())
// don't start any new handshakes if we're full up
if (max_candidates <= tr_peerMsgs::size())
{ {
return {}; return {};
} }

View File

@@ -18,7 +18,7 @@
#define tr_logAddDebugIo(io, msg) tr_logAddDebug(msg, (io)->display_name()) #define tr_logAddDebugIo(io, msg) tr_logAddDebug(msg, (io)->display_name())
#define tr_logAddTraceIo(io, msg) tr_logAddTrace(msg, (io)->display_name()) #define tr_logAddTraceIo(io, msg) tr_logAddTrace(msg, (io)->display_name())
tr_peer_socket::tr_peer_socket(tr_session* session, tr_address const& address, tr_port port, tr_socket_t sock) tr_peer_socket::tr_peer_socket(tr_session const* session, tr_address const& address, tr_port port, tr_socket_t sock)
: handle{ sock } : handle{ sock }
, address_{ address } , address_{ address }
, port_{ port } , port_{ port }
@@ -115,16 +115,13 @@ size_t tr_peer_socket::try_read(Buffer& buf, size_t max, tr_error** error) const
} }
#ifdef WITH_UTP #ifdef WITH_UTP
if (is_utp()) // utp_read_drained() notifies libutp that this read buffer is empty.
{ // It opens up the congestion window by sending an ACK (soonish) if
// utp_read_drained() notifies libutp that this read buffer is // one was not going to be sent.
// empty. It opens up the congestion window by sending an ACK if (is_utp() && std::empty(buf))
// (soonish) if one was not going to be sent.
if (std::empty(buf))
{ {
utp_read_drained(handle.utp); utp_read_drained(handle.utp);
} }
}
#endif #endif
return {}; return {};

View File

@@ -29,7 +29,7 @@ public:
using Buffer = libtransmission::Buffer; using Buffer = libtransmission::Buffer;
tr_peer_socket() = default; tr_peer_socket() = default;
tr_peer_socket(tr_session* session, tr_address const& address, tr_port port, tr_socket_t sock); tr_peer_socket(tr_session const* session, tr_address const& address, tr_port port, tr_socket_t sock);
tr_peer_socket(tr_address const& address, tr_port port, struct UTPSocket* const sock); tr_peer_socket(tr_address const& address, tr_port port, struct UTPSocket* const sock);
tr_peer_socket(tr_peer_socket&&) = default; tr_peer_socket(tr_peer_socket&&) = default;
tr_peer_socket(tr_peer_socket const&) = delete; tr_peer_socket(tr_peer_socket const&) = delete;

View File

@@ -66,7 +66,7 @@ using shared_unique_ptr = std::unique_ptr<CURLSH, ShareDeleter>;
struct MultiDeleter struct MultiDeleter
{ {
void operator()(CURLM* multi) void operator()(CURLM* multi) const
{ {
if (multi == nullptr) if (multi == nullptr)
{ {
@@ -585,7 +585,7 @@ public:
return std::empty(queued_tasks_) && std::empty(running_tasks_); return std::empty(queued_tasks_) && std::empty(running_tasks_);
} }
void remove_task(Task& task) void remove_task(Task const& task)
{ {
auto const lock = std::unique_lock{ tasks_mutex_ }; auto const lock = std::unique_lock{ tasks_mutex_ };