udp: fix/silence clippy warnings

This commit is contained in:
Joakim Frostegård 2024-01-20 10:08:53 +01:00
parent 5401eaf85f
commit 9d1bba5e92
8 changed files with 137 additions and 123 deletions

View file

@ -22,6 +22,7 @@ use common::{
}; };
use config::Config; use config::Config;
use workers::socket::ConnectionValidator; use workers::socket::ConnectionValidator;
use workers::swarm::SwarmWorker;
pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker"; pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker";
pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION"); pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION");
@ -79,16 +80,18 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
WorkerIndex::SwarmWorker(i), WorkerIndex::SwarmWorker(i),
); );
workers::swarm::run_swarm_worker( let mut worker = SwarmWorker {
sentinel, _sentinel: sentinel,
config, config,
state, state,
server_start_instant, server_start_instant,
request_receiver, request_receiver,
response_sender, response_sender,
statistics_sender, statistics_sender,
SwarmWorkerIndex(i), worker_index: SwarmWorkerIndex(i),
) };
worker.run();
}) })
.with_context(|| "spawn swarm worker")?; .with_context(|| "spawn swarm worker")?;
} }

View file

@ -49,6 +49,7 @@ pub struct SocketWorker {
} }
impl SocketWorker { impl SocketWorker {
#[allow(clippy::too_many_arguments)]
pub fn run( pub fn run(
_sentinel: PanicSentinel, _sentinel: PanicSentinel,
shared_state: State, shared_state: State,

View file

@ -36,6 +36,7 @@ const EXTRA_PACKET_SIZE_IPV4: usize = 8 + 18 + 20 + 8;
/// - 8 bit udp header /// - 8 bit udp header
const EXTRA_PACKET_SIZE_IPV6: usize = 8 + 18 + 40 + 8; const EXTRA_PACKET_SIZE_IPV6: usize = 8 + 18 + 40 + 8;
#[allow(clippy::too_many_arguments)]
pub fn run_socket_worker( pub fn run_socket_worker(
sentinel: PanicSentinel, sentinel: PanicSentinel,
shared_state: State, shared_state: State,

View file

@ -130,9 +130,10 @@ mod tests {
return TestResult::discard(); return TestResult::discard();
} }
let mut config = Config::default(); let config = Config {
swarm_workers: swarm_workers as usize,
config.swarm_workers = swarm_workers as usize; ..Default::default()
};
let valid_until = ValidUntil::new(ServerStartInstant::new(), 1); let valid_until = ValidUntil::new(ServerStartInstant::new(), 1);

View file

@ -96,6 +96,7 @@ pub struct SocketWorker {
} }
impl SocketWorker { impl SocketWorker {
#[allow(clippy::too_many_arguments)]
pub fn run( pub fn run(
_sentinel: PanicSentinel, _sentinel: PanicSentinel,
shared_state: State, shared_state: State,
@ -136,7 +137,7 @@ impl SocketWorker {
.build() .build()
.unwrap(); .unwrap();
let recv_sqe = recv_helper.create_entry(buf_ring.bgid().try_into().unwrap()); let recv_sqe = recv_helper.create_entry(buf_ring.bgid());
// This timeout enables regular updates of pending_scrape_valid_until // This timeout enables regular updates of pending_scrape_valid_until
// and wakes the main loop to send any pending responses in the case // and wakes the main loop to send any pending responses in the case
@ -209,7 +210,7 @@ impl SocketWorker {
// Enqueue local responses // Enqueue local responses
for _ in 0..sq_space { for _ in 0..sq_space {
if let Some((response, addr)) = self.local_responses.pop_front() { if let Some((response, addr)) = self.local_responses.pop_front() {
match self.send_buffers.prepare_entry(response.into(), addr) { match self.send_buffers.prepare_entry(response, addr) {
Ok(entry) => { Ok(entry) => {
unsafe { ring.submission().push(&entry).unwrap() }; unsafe { ring.submission().push(&entry).unwrap() };
@ -471,11 +472,11 @@ impl SocketWorker {
let worker_index = let worker_index =
SwarmWorkerIndex::from_info_hash(&self.config, request.info_hash); SwarmWorkerIndex::from_info_hash(&self.config, request.info_hash);
if let Err(_) = self.request_sender.try_send_to( if self
worker_index, .request_sender
ConnectedRequest::Announce(request), .try_send_to(worker_index, ConnectedRequest::Announce(request), src)
src, .is_err()
) { {
::log::warn!("request sender full, dropping request"); ::log::warn!("request sender full, dropping request");
} }
} else { } else {
@ -500,11 +501,11 @@ impl SocketWorker {
); );
for (swarm_worker_index, request) in split_requests { for (swarm_worker_index, request) in split_requests {
if let Err(_) = self.request_sender.try_send_to( if self
swarm_worker_index, .request_sender
ConnectedRequest::Scrape(request), .try_send_to(swarm_worker_index, ConnectedRequest::Scrape(request), src)
src, .is_err()
) { {
::log::warn!("request sender full, dropping request"); ::log::warn!("request sender full, dropping request");
} }
} }

View file

@ -11,6 +11,7 @@ use crate::config::Config;
use super::{SOCKET_IDENTIFIER, USER_DATA_RECV}; use super::{SOCKET_IDENTIFIER, USER_DATA_RECV};
#[allow(clippy::enum_variant_names)]
pub enum Error { pub enum Error {
RecvMsgParseError, RecvMsgParseError,
RecvMsgTruncated, RecvMsgTruncated,

View file

@ -58,7 +58,7 @@ impl SendBuffers {
self.likely_next_free_index = 0; self.likely_next_free_index = 0;
} }
pub fn prepare_entry<'a>( pub fn prepare_entry(
&mut self, &mut self,
response: Response, response: Response,
addr: CanonicalSocketAddr, addr: CanonicalSocketAddr,

View file

@ -17,24 +17,28 @@ use crate::config::Config;
use storage::TorrentMaps; use storage::TorrentMaps;
pub fn run_swarm_worker( pub struct SwarmWorker {
_sentinel: PanicSentinel, pub _sentinel: PanicSentinel,
config: Config, pub config: Config,
state: State, pub state: State,
server_start_instant: ServerStartInstant, pub server_start_instant: ServerStartInstant,
request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>, pub request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
mut response_sender: ConnectedResponseSender, pub response_sender: ConnectedResponseSender,
statistics_sender: Sender<StatisticsMessage>, pub statistics_sender: Sender<StatisticsMessage>,
worker_index: SwarmWorkerIndex, pub worker_index: SwarmWorkerIndex,
) { }
impl SwarmWorker {
pub fn run(&mut self) {
let mut torrents = TorrentMaps::default(); let mut torrents = TorrentMaps::default();
let mut rng = SmallRng::from_entropy(); let mut rng = SmallRng::from_entropy();
let timeout = Duration::from_millis(config.request_channel_recv_timeout_ms); let timeout = Duration::from_millis(self.config.request_channel_recv_timeout_ms);
let mut peer_valid_until = ValidUntil::new(server_start_instant, config.cleaning.max_peer_age); let mut peer_valid_until =
ValidUntil::new(self.server_start_instant, self.config.cleaning.max_peer_age);
let cleaning_interval = Duration::from_secs(config.cleaning.torrent_cleaning_interval); let cleaning_interval = Duration::from_secs(self.config.cleaning.torrent_cleaning_interval);
let statistics_update_interval = Duration::from_secs(config.statistics.interval); let statistics_update_interval = Duration::from_secs(self.config.statistics.interval);
let mut last_cleaning = Instant::now(); let mut last_cleaning = Instant::now();
let mut last_statistics_update = Instant::now(); let mut last_statistics_update = Instant::now();
@ -42,7 +46,7 @@ pub fn run_swarm_worker(
let mut iter_counter = 0usize; let mut iter_counter = 0usize;
loop { loop {
if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) { if let Ok((sender_index, request, src)) = self.request_receiver.recv_timeout(timeout) {
// It is OK to block here as long as we don't also do blocking // It is OK to block here as long as we don't also do blocking
// sends in socket workers (doing both could cause a deadlock) // sends in socket workers (doing both could cause a deadlock)
match (request, src.get().ip()) { match (request, src.get().ip()) {
@ -53,8 +57,8 @@ pub fn run_swarm_worker(
.entry(request.info_hash) .entry(request.info_hash)
.or_default() .or_default()
.announce( .announce(
&config, &self.config,
&statistics_sender, &self.statistics_sender,
&mut rng, &mut rng,
&request, &request,
ip.into(), ip.into(),
@ -62,7 +66,7 @@ pub fn run_swarm_worker(
); );
// It doesn't matter which socket worker receives announce responses // It doesn't matter which socket worker receives announce responses
response_sender self.response_sender
.send_to_any(src, ConnectedResponse::AnnounceIpv4(response)) .send_to_any(src, ConnectedResponse::AnnounceIpv4(response))
.expect("swarm response channel is closed"); .expect("swarm response channel is closed");
} }
@ -73,8 +77,8 @@ pub fn run_swarm_worker(
.entry(request.info_hash) .entry(request.info_hash)
.or_default() .or_default()
.announce( .announce(
&config, &self.config,
&statistics_sender, &self.statistics_sender,
&mut rng, &mut rng,
&request, &request,
ip.into(), ip.into(),
@ -82,21 +86,21 @@ pub fn run_swarm_worker(
); );
// It doesn't matter which socket worker receives announce responses // It doesn't matter which socket worker receives announce responses
response_sender self.response_sender
.send_to_any(src, ConnectedResponse::AnnounceIpv6(response)) .send_to_any(src, ConnectedResponse::AnnounceIpv6(response))
.expect("swarm response channel is closed"); .expect("swarm response channel is closed");
} }
(ConnectedRequest::Scrape(request), IpAddr::V4(_)) => { (ConnectedRequest::Scrape(request), IpAddr::V4(_)) => {
let response = torrents.ipv4.scrape(request); let response = torrents.ipv4.scrape(request);
response_sender self.response_sender
.send_to(sender_index, src, ConnectedResponse::Scrape(response)) .send_to(sender_index, src, ConnectedResponse::Scrape(response))
.expect("swarm response channel is closed"); .expect("swarm response channel is closed");
} }
(ConnectedRequest::Scrape(request), IpAddr::V6(_)) => { (ConnectedRequest::Scrape(request), IpAddr::V6(_)) => {
let response = torrents.ipv6.scrape(request); let response = torrents.ipv6.scrape(request);
response_sender self.response_sender
.send_to(sender_index, src, ConnectedResponse::Scrape(response)) .send_to(sender_index, src, ConnectedResponse::Scrape(response))
.expect("swarm response channel is closed"); .expect("swarm response channel is closed");
} }
@ -107,26 +111,27 @@ pub fn run_swarm_worker(
if iter_counter % 128 == 0 { if iter_counter % 128 == 0 {
let now = Instant::now(); let now = Instant::now();
peer_valid_until = ValidUntil::new(server_start_instant, config.cleaning.max_peer_age); peer_valid_until =
ValidUntil::new(self.server_start_instant, self.config.cleaning.max_peer_age);
if now > last_cleaning + cleaning_interval { if now > last_cleaning + cleaning_interval {
torrents.clean_and_update_statistics( torrents.clean_and_update_statistics(
&config, &self.config,
&state, &self.state,
&statistics_sender, &self.statistics_sender,
&state.access_list, &self.state.access_list,
server_start_instant, self.server_start_instant,
worker_index, self.worker_index,
); );
last_cleaning = now; last_cleaning = now;
} }
if config.statistics.active() if self.config.statistics.active()
&& now > last_statistics_update + statistics_update_interval && now > last_statistics_update + statistics_update_interval
{ {
state.statistics_ipv4.torrents[worker_index.0] self.state.statistics_ipv4.torrents[self.worker_index.0]
.store(torrents.ipv4.num_torrents(), Ordering::Release); .store(torrents.ipv4.num_torrents(), Ordering::Release);
state.statistics_ipv6.torrents[worker_index.0] self.state.statistics_ipv6.torrents[self.worker_index.0]
.store(torrents.ipv6.num_torrents(), Ordering::Release); .store(torrents.ipv6.num_torrents(), Ordering::Release);
last_statistics_update = now; last_statistics_update = now;
@ -136,3 +141,4 @@ pub fn run_swarm_worker(
iter_counter = iter_counter.wrapping_add(1); iter_counter = iter_counter.wrapping_add(1);
} }
} }
}