mirror of
https://github.com/YGGverse/aquatic.git
synced 2026-04-01 10:15:31 +00:00
udp: remove thingbuf in favor of crossbeam channel
thingbuf didn't have obvious performance advantages and is a lot less mature. Furthermore, it doesn't support anything like crossbeam Receiver::try_iter, which is prefereable now that announce responses can be sent to any socket worker.
This commit is contained in:
parent
e77c9f46e7
commit
1a6b4345d4
10 changed files with 163 additions and 329 deletions
|
|
@ -1,4 +1,3 @@
|
|||
use std::borrow::Cow;
|
||||
use std::io::{Cursor, ErrorKind};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{Duration, Instant};
|
||||
|
|
@ -42,7 +41,7 @@ pub struct SocketWorker {
|
|||
server_start_instant: ServerStartInstant,
|
||||
pending_scrape_responses: PendingScrapeResponseSlab,
|
||||
socket: UdpSocket,
|
||||
opt_resend_buffer: Option<Vec<(Response, CanonicalSocketAddr)>>,
|
||||
opt_resend_buffer: Option<Vec<(CanonicalSocketAddr, Response)>>,
|
||||
buffer: [u8; BUFFER_SIZE],
|
||||
polling_mode: PollMode,
|
||||
/// Storage for requests that couldn't be sent to swarm worker because channel was full
|
||||
|
|
@ -133,14 +132,14 @@ impl SocketWorker {
|
|||
|
||||
// If resend buffer is enabled, send any responses in it
|
||||
if let Some(resend_buffer) = self.opt_resend_buffer.as_mut() {
|
||||
for (response, addr) in resend_buffer.drain(..) {
|
||||
for (addr, response) in resend_buffer.drain(..) {
|
||||
Self::send_response(
|
||||
&self.config,
|
||||
&self.shared_state,
|
||||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut None,
|
||||
response.into(),
|
||||
response,
|
||||
addr,
|
||||
);
|
||||
}
|
||||
|
|
@ -235,7 +234,7 @@ impl SocketWorker {
|
|||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut self.opt_resend_buffer,
|
||||
CowResponse::Error(Cow::Owned(response)),
|
||||
Response::Error(response),
|
||||
src,
|
||||
);
|
||||
}
|
||||
|
|
@ -310,7 +309,7 @@ impl SocketWorker {
|
|||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut self.opt_resend_buffer,
|
||||
CowResponse::Connect(Cow::Owned(response)),
|
||||
Response::Connect(response),
|
||||
src,
|
||||
);
|
||||
|
||||
|
|
@ -346,7 +345,7 @@ impl SocketWorker {
|
|||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut self.opt_resend_buffer,
|
||||
CowResponse::Error(Cow::Owned(response)),
|
||||
Response::Error(response),
|
||||
src,
|
||||
);
|
||||
|
||||
|
|
@ -392,30 +391,20 @@ impl SocketWorker {
|
|||
}
|
||||
|
||||
fn handle_swarm_worker_responses(&mut self) {
|
||||
loop {
|
||||
let recv_ref = if let Ok(recv_ref) = self.response_receiver.try_recv_ref() {
|
||||
recv_ref
|
||||
} else {
|
||||
break;
|
||||
};
|
||||
|
||||
let response = match recv_ref.kind {
|
||||
ConnectedResponseKind::Scrape => {
|
||||
for (addr, response) in self.response_receiver.try_iter() {
|
||||
let response = match response {
|
||||
ConnectedResponse::Scrape(response) => {
|
||||
if let Some(r) = self
|
||||
.pending_scrape_responses
|
||||
.add_and_get_finished(&recv_ref.scrape)
|
||||
.add_and_get_finished(&response)
|
||||
{
|
||||
CowResponse::Scrape(Cow::Owned(r))
|
||||
Response::Scrape(r)
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
ConnectedResponseKind::AnnounceIpv4 => {
|
||||
CowResponse::AnnounceIpv4(Cow::Borrowed(&recv_ref.announce_ipv4))
|
||||
}
|
||||
ConnectedResponseKind::AnnounceIpv6 => {
|
||||
CowResponse::AnnounceIpv6(Cow::Borrowed(&recv_ref.announce_ipv6))
|
||||
}
|
||||
ConnectedResponse::AnnounceIpv4(r) => Response::AnnounceIpv4(r),
|
||||
ConnectedResponse::AnnounceIpv6(r) => Response::AnnounceIpv6(r),
|
||||
};
|
||||
|
||||
Self::send_response(
|
||||
|
|
@ -425,7 +414,7 @@ impl SocketWorker {
|
|||
&mut self.buffer,
|
||||
&mut self.opt_resend_buffer,
|
||||
response,
|
||||
recv_ref.addr,
|
||||
addr,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -435,8 +424,8 @@ impl SocketWorker {
|
|||
shared_state: &State,
|
||||
socket: &mut UdpSocket,
|
||||
buffer: &mut [u8],
|
||||
opt_resend_buffer: &mut Option<Vec<(Response, CanonicalSocketAddr)>>,
|
||||
response: CowResponse,
|
||||
opt_resend_buffer: &mut Option<Vec<(CanonicalSocketAddr, Response)>>,
|
||||
response: Response,
|
||||
canonical_addr: CanonicalSocketAddr,
|
||||
) {
|
||||
let mut buffer = Cursor::new(&mut buffer[..]);
|
||||
|
|
@ -478,18 +467,18 @@ impl SocketWorker {
|
|||
};
|
||||
|
||||
match response {
|
||||
CowResponse::Connect(_) => {
|
||||
Response::Connect(_) => {
|
||||
stats.responses_sent_connect.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
CowResponse::AnnounceIpv4(_) | CowResponse::AnnounceIpv6(_) => {
|
||||
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => {
|
||||
stats
|
||||
.responses_sent_announce
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
CowResponse::Scrape(_) => {
|
||||
Response::Scrape(_) => {
|
||||
stats.responses_sent_scrape.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
CowResponse::Error(_) => {
|
||||
Response::Error(_) => {
|
||||
stats.responses_sent_error.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
|
@ -503,7 +492,7 @@ impl SocketWorker {
|
|||
if resend_buffer.len() < config.network.resend_buffer_max_len {
|
||||
::log::debug!("Adding response to resend queue, since sending it to {} failed with: {:#}", addr, err);
|
||||
|
||||
resend_buffer.push((response.into_owned(), canonical_addr));
|
||||
resend_buffer.push((canonical_addr, response));
|
||||
} else {
|
||||
::log::warn!("Response resend buffer full, dropping response");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ mod buf_ring;
|
|||
mod recv_helper;
|
||||
mod send_buffers;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
|
|
@ -217,8 +216,7 @@ impl SocketWorker {
|
|||
num_send_added += 1;
|
||||
}
|
||||
Err(send_buffers::Error::NoBuffers(response)) => {
|
||||
self.local_responses
|
||||
.push_front((response.into_owned(), addr));
|
||||
self.local_responses.push_front((response, addr));
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
@ -233,40 +231,32 @@ impl SocketWorker {
|
|||
|
||||
// Enqueue swarm worker responses
|
||||
for _ in 0..(sq_space - num_send_added) {
|
||||
let recv_ref = if let Ok(recv_ref) = self.response_receiver.try_recv_ref() {
|
||||
recv_ref
|
||||
let (addr, response) = if let Ok(r) = self.response_receiver.try_recv() {
|
||||
r
|
||||
} else {
|
||||
break;
|
||||
};
|
||||
|
||||
let response = match recv_ref.kind {
|
||||
ConnectedResponseKind::AnnounceIpv4 => {
|
||||
CowResponse::AnnounceIpv4(Cow::Borrowed(&recv_ref.announce_ipv4))
|
||||
}
|
||||
ConnectedResponseKind::AnnounceIpv6 => {
|
||||
CowResponse::AnnounceIpv6(Cow::Borrowed(&recv_ref.announce_ipv6))
|
||||
}
|
||||
ConnectedResponseKind::Scrape => {
|
||||
if let Some(response) = self
|
||||
.pending_scrape_responses
|
||||
.add_and_get_finished(&recv_ref.scrape)
|
||||
{
|
||||
CowResponse::Scrape(Cow::Owned(response))
|
||||
let response = match response {
|
||||
ConnectedResponse::AnnounceIpv4(r) => Response::AnnounceIpv4(r),
|
||||
ConnectedResponse::AnnounceIpv6(r) => Response::AnnounceIpv6(r),
|
||||
ConnectedResponse::Scrape(r) => {
|
||||
if let Some(r) = self.pending_scrape_responses.add_and_get_finished(&r) {
|
||||
Response::Scrape(r)
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match self.send_buffers.prepare_entry(response, recv_ref.addr) {
|
||||
match self.send_buffers.prepare_entry(response, addr) {
|
||||
Ok(entry) => {
|
||||
unsafe { ring.submission().push(&entry).unwrap() };
|
||||
|
||||
num_send_added += 1;
|
||||
}
|
||||
Err(send_buffers::Error::NoBuffers(response)) => {
|
||||
self.local_responses
|
||||
.push_back((response.into_owned(), recv_ref.addr));
|
||||
self.local_responses.push_back((response, addr));
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,14 +6,15 @@ use std::{
|
|||
};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::Response;
|
||||
use io_uring::opcode::SendMsg;
|
||||
|
||||
use crate::{common::CowResponse, config::Config};
|
||||
use crate::config::Config;
|
||||
|
||||
use super::{RESPONSE_BUF_LEN, SOCKET_IDENTIFIER};
|
||||
|
||||
pub enum Error<'a> {
|
||||
NoBuffers(CowResponse<'a>),
|
||||
pub enum Error {
|
||||
NoBuffers(Response),
|
||||
SerializationFailed(std::io::Error),
|
||||
}
|
||||
|
||||
|
|
@ -59,9 +60,9 @@ impl SendBuffers {
|
|||
|
||||
pub fn prepare_entry<'a>(
|
||||
&mut self,
|
||||
response: CowResponse<'a>,
|
||||
response: Response,
|
||||
addr: CanonicalSocketAddr,
|
||||
) -> Result<io_uring::squeue::Entry, Error<'a>> {
|
||||
) -> Result<io_uring::squeue::Entry, Error> {
|
||||
let index = if let Some(index) = self.next_free_index() {
|
||||
index
|
||||
} else {
|
||||
|
|
@ -163,7 +164,7 @@ impl SendBuffer {
|
|||
|
||||
fn prepare_entry(
|
||||
&mut self,
|
||||
response: CowResponse,
|
||||
response: Response,
|
||||
addr: CanonicalSocketAddr,
|
||||
socket_is_ipv4: bool,
|
||||
metadata: &mut SendBufferMetadata,
|
||||
|
|
@ -237,12 +238,12 @@ pub enum ResponseType {
|
|||
}
|
||||
|
||||
impl ResponseType {
|
||||
fn from_response(response: &CowResponse) -> Self {
|
||||
fn from_response(response: &Response) -> Self {
|
||||
match response {
|
||||
CowResponse::Connect(_) => Self::Connect,
|
||||
CowResponse::AnnounceIpv4(_) | CowResponse::AnnounceIpv6(_) => Self::Announce,
|
||||
CowResponse::Scrape(_) => Self::Scrape,
|
||||
CowResponse::Error(_) => Self::Error,
|
||||
Response::Connect(_) => Self::Connect,
|
||||
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => Self::Announce,
|
||||
Response::Scrape(_) => Self::Scrape,
|
||||
Response::Error(_) => Self::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,15 +47,7 @@ pub fn run_swarm_worker(
|
|||
// sends in socket workers (doing both could cause a deadlock)
|
||||
match (request, src.get().ip()) {
|
||||
(ConnectedRequest::Announce(request), IpAddr::V4(ip)) => {
|
||||
// It doesn't matter which socket worker receives announce responses
|
||||
let mut send_ref = response_sender
|
||||
.send_ref_to_any()
|
||||
.expect("swarm response channel is closed");
|
||||
|
||||
send_ref.addr = src;
|
||||
send_ref.kind = ConnectedResponseKind::AnnounceIpv4;
|
||||
|
||||
torrents
|
||||
let response = torrents
|
||||
.ipv4
|
||||
.0
|
||||
.entry(request.info_hash)
|
||||
|
|
@ -67,19 +59,15 @@ pub fn run_swarm_worker(
|
|||
&request,
|
||||
ip.into(),
|
||||
peer_valid_until,
|
||||
&mut send_ref.announce_ipv4,
|
||||
);
|
||||
|
||||
// It doesn't matter which socket worker receives announce responses
|
||||
response_sender
|
||||
.send_to_any(src, ConnectedResponse::AnnounceIpv4(response))
|
||||
.expect("swarm response channel is closed");
|
||||
}
|
||||
(ConnectedRequest::Announce(request), IpAddr::V6(ip)) => {
|
||||
// It doesn't matter which socket worker receives announce responses
|
||||
let mut send_ref = response_sender
|
||||
.send_ref_to_any()
|
||||
.expect("swarm response channel is closed");
|
||||
|
||||
send_ref.addr = src;
|
||||
send_ref.kind = ConnectedResponseKind::AnnounceIpv6;
|
||||
|
||||
torrents
|
||||
let response = torrents
|
||||
.ipv6
|
||||
.0
|
||||
.entry(request.info_hash)
|
||||
|
|
@ -91,28 +79,26 @@ pub fn run_swarm_worker(
|
|||
&request,
|
||||
ip.into(),
|
||||
peer_valid_until,
|
||||
&mut send_ref.announce_ipv6,
|
||||
);
|
||||
|
||||
// It doesn't matter which socket worker receives announce responses
|
||||
response_sender
|
||||
.send_to_any(src, ConnectedResponse::AnnounceIpv6(response))
|
||||
.expect("swarm response channel is closed");
|
||||
}
|
||||
(ConnectedRequest::Scrape(request), IpAddr::V4(_)) => {
|
||||
let mut send_ref = response_sender
|
||||
.send_ref_to(sender_index)
|
||||
let response = torrents.ipv4.scrape(request);
|
||||
|
||||
response_sender
|
||||
.send_to(sender_index, src, ConnectedResponse::Scrape(response))
|
||||
.expect("swarm response channel is closed");
|
||||
|
||||
send_ref.addr = src;
|
||||
send_ref.kind = ConnectedResponseKind::Scrape;
|
||||
|
||||
torrents.ipv4.scrape(request, &mut send_ref.scrape);
|
||||
}
|
||||
(ConnectedRequest::Scrape(request), IpAddr::V6(_)) => {
|
||||
let mut send_ref = response_sender
|
||||
.send_ref_to(sender_index)
|
||||
let response = torrents.ipv6.scrape(request);
|
||||
|
||||
response_sender
|
||||
.send_to(sender_index, src, ConnectedResponse::Scrape(response))
|
||||
.expect("swarm response channel is closed");
|
||||
|
||||
send_ref.addr = src;
|
||||
send_ref.kind = ConnectedResponseKind::Scrape;
|
||||
|
||||
torrents.ipv6.scrape(request, &mut send_ref.scrape);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,24 +79,29 @@ impl TorrentMaps {
|
|||
pub struct TorrentMap<I: Ip>(pub IndexMap<InfoHash, TorrentData<I>>);
|
||||
|
||||
impl<I: Ip> TorrentMap<I> {
|
||||
pub fn scrape(&mut self, request: PendingScrapeRequest, response: &mut PendingScrapeResponse) {
|
||||
response.slab_key = request.slab_key;
|
||||
pub fn scrape(&mut self, request: PendingScrapeRequest) -> PendingScrapeResponse {
|
||||
let torrent_stats = request
|
||||
.info_hashes
|
||||
.into_iter()
|
||||
.map(|(i, info_hash)| {
|
||||
let stats = self
|
||||
.0
|
||||
.get(&info_hash)
|
||||
.map(|torrent_data| torrent_data.scrape_statistics())
|
||||
.unwrap_or_else(|| TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers::new(0),
|
||||
leechers: NumberOfPeers::new(0),
|
||||
completed: NumberOfDownloads::new(0),
|
||||
});
|
||||
|
||||
let torrent_stats = request.info_hashes.into_iter().map(|(i, info_hash)| {
|
||||
let stats = self
|
||||
.0
|
||||
.get(&info_hash)
|
||||
.map(|torrent_data| torrent_data.scrape_statistics())
|
||||
.unwrap_or_else(|| TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers::new(0),
|
||||
leechers: NumberOfPeers::new(0),
|
||||
completed: NumberOfDownloads::new(0),
|
||||
});
|
||||
(i, stats)
|
||||
})
|
||||
.collect();
|
||||
|
||||
(i, stats)
|
||||
});
|
||||
|
||||
response.torrent_stats.extend(torrent_stats);
|
||||
PendingScrapeResponse {
|
||||
slab_key: request.slab_key,
|
||||
torrent_stats,
|
||||
}
|
||||
}
|
||||
/// Remove forbidden or inactive torrents, reclaim space and return number of remaining peers
|
||||
fn clean_and_get_statistics(
|
||||
|
|
@ -187,8 +192,7 @@ impl<I: Ip> TorrentData<I> {
|
|||
request: &AnnounceRequest,
|
||||
ip_address: I,
|
||||
valid_until: ValidUntil,
|
||||
response: &mut AnnounceResponse<I>,
|
||||
) {
|
||||
) -> AnnounceResponse<I> {
|
||||
let max_num_peers_to_take: usize = if request.peers_wanted.0.get() <= 0 {
|
||||
config.protocol.max_response_peers
|
||||
} else {
|
||||
|
|
@ -209,23 +213,24 @@ impl<I: Ip> TorrentData<I> {
|
|||
// Create the response before inserting the peer. This means that we
|
||||
// don't have to filter it out from the response peers, and that the
|
||||
// reported number of seeders/leechers will not include it
|
||||
let opt_removed_peer = match self {
|
||||
let (response, opt_removed_peer) = match self {
|
||||
Self::Small(peer_map) => {
|
||||
let opt_removed_peer = peer_map.remove(&peer_map_key);
|
||||
|
||||
let (seeders, leechers) = peer_map.num_seeders_leechers();
|
||||
|
||||
response.fixed = AnnounceResponseFixedData {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval::new(
|
||||
config.protocol.peer_announce_interval,
|
||||
),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
let response = AnnounceResponse {
|
||||
fixed: AnnounceResponseFixedData {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval::new(
|
||||
config.protocol.peer_announce_interval,
|
||||
),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
},
|
||||
peers: peer_map.extract_response_peers(max_num_peers_to_take),
|
||||
};
|
||||
|
||||
peer_map.extract_response_peers(max_num_peers_to_take, &mut response.peers);
|
||||
|
||||
// Convert peer map to large variant if it is full and
|
||||
// announcing peer is not stopped and will therefore be
|
||||
// inserted
|
||||
|
|
@ -233,24 +238,25 @@ impl<I: Ip> TorrentData<I> {
|
|||
*self = Self::Large(peer_map.to_large());
|
||||
}
|
||||
|
||||
opt_removed_peer
|
||||
(response, opt_removed_peer)
|
||||
}
|
||||
Self::Large(peer_map) => {
|
||||
let opt_removed_peer = peer_map.remove_peer(&peer_map_key);
|
||||
|
||||
let (seeders, leechers) = peer_map.num_seeders_leechers();
|
||||
|
||||
response.fixed = AnnounceResponseFixedData {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval::new(
|
||||
config.protocol.peer_announce_interval,
|
||||
),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
let response = AnnounceResponse {
|
||||
fixed: AnnounceResponseFixedData {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval::new(
|
||||
config.protocol.peer_announce_interval,
|
||||
),
|
||||
leechers: NumberOfPeers::new(leechers.try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers::new(seeders.try_into().unwrap_or(i32::MAX)),
|
||||
},
|
||||
peers: peer_map.extract_response_peers(rng, max_num_peers_to_take),
|
||||
};
|
||||
|
||||
peer_map.extract_response_peers(rng, max_num_peers_to_take, &mut response.peers);
|
||||
|
||||
// Try shrinking the map if announcing peer is stopped and
|
||||
// will therefore not be inserted
|
||||
if status == PeerStatus::Stopped {
|
||||
|
|
@ -259,7 +265,7 @@ impl<I: Ip> TorrentData<I> {
|
|||
}
|
||||
}
|
||||
|
||||
opt_removed_peer
|
||||
(response, opt_removed_peer)
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -290,6 +296,8 @@ impl<I: Ip> TorrentData<I> {
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
pub fn scrape_statistics(&self) -> TorrentScrapeStatistics {
|
||||
|
|
@ -313,7 +321,7 @@ impl<I: Ip> Default for TorrentData<I> {
|
|||
}
|
||||
|
||||
/// Store torrents with up to two peers without an extra heap allocation
|
||||
///
|
||||
///
|
||||
/// On public open trackers, this is likely to be the majority of torrents.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct SmallPeerMap<I: Ip>(ArrayVec<(ResponsePeer<I>, Peer), SMALL_PEER_MAP_CAPACITY>);
|
||||
|
|
@ -344,12 +352,8 @@ impl<I: Ip> SmallPeerMap<I> {
|
|||
None
|
||||
}
|
||||
|
||||
fn extract_response_peers(
|
||||
&self,
|
||||
max_num_peers_to_take: usize,
|
||||
peers: &mut Vec<ResponsePeer<I>>,
|
||||
) {
|
||||
peers.extend(self.0.iter().take(max_num_peers_to_take).map(|(k, _)| k))
|
||||
fn extract_response_peers(&self, max_num_peers_to_take: usize) -> Vec<ResponsePeer<I>> {
|
||||
Vec::from_iter(self.0.iter().take(max_num_peers_to_take).map(|(k, _)| *k))
|
||||
}
|
||||
|
||||
fn clean_and_get_num_peers(
|
||||
|
|
@ -427,10 +431,9 @@ impl<I: Ip> LargePeerMap<I> {
|
|||
&self,
|
||||
rng: &mut impl Rng,
|
||||
max_num_peers_to_take: usize,
|
||||
peers: &mut Vec<ResponsePeer<I>>,
|
||||
) {
|
||||
) -> Vec<ResponsePeer<I>> {
|
||||
if self.peers.len() <= max_num_peers_to_take {
|
||||
peers.extend(self.peers.keys());
|
||||
self.peers.keys().copied().collect()
|
||||
} else {
|
||||
let middle_index = self.peers.len() / 2;
|
||||
let num_to_take_per_half = max_num_peers_to_take / 2;
|
||||
|
|
@ -451,12 +454,16 @@ impl<I: Ip> LargePeerMap<I> {
|
|||
let end_half_one = offset_half_one + num_to_take_per_half;
|
||||
let end_half_two = offset_half_two + num_to_take_per_half;
|
||||
|
||||
let mut peers = Vec::with_capacity(max_num_peers_to_take);
|
||||
|
||||
if let Some(slice) = self.peers.get_range(offset_half_one..end_half_one) {
|
||||
peers.extend(slice.keys());
|
||||
}
|
||||
if let Some(slice) = self.peers.get_range(offset_half_two..end_half_two) {
|
||||
peers.extend(slice.keys());
|
||||
}
|
||||
|
||||
peers
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue