aquatic_common: extract response peers: don't return sender

Seems to fix or at least help with some issues with
real clients being slow to initiate transfers
This commit is contained in:
Joakim Frostegård 2020-08-16 23:07:39 +02:00
parent b5452c2954
commit 6ee8ed4895
6 changed files with 89 additions and 70 deletions

View file

@ -27,12 +27,13 @@ impl ValidUntil {
/// half-random selection of peers from first and second halves of map,
/// in order to avoid returning too homogeneous peers.
///
/// Don't care if we send back announcing peer.
/// Might return one less peer than wanted since sender is filtered out.
#[inline]
pub fn extract_response_peers<K, V, R, F>(
rng: &mut impl Rng,
peer_map: &IndexMap<K, V>,
max_num_peers_to_take: usize,
sender_peer_map_key: K,
peer_conversion_function: F
) -> Vec<R>
where
@ -41,9 +42,15 @@ pub fn extract_response_peers<K, V, R, F>(
{
let peer_map_len = peer_map.len();
if peer_map_len <= max_num_peers_to_take {
peer_map.values()
.map(peer_conversion_function)
if peer_map_len <= max_num_peers_to_take + 1 {
peer_map.iter()
.filter_map(|(k, v)|{
if *k == sender_peer_map_key {
None
} else {
Some(peer_conversion_function(v))
}
})
.collect()
} else {
let half_num_to_take = max_num_peers_to_take / 2;
@ -64,17 +71,19 @@ pub fn extract_response_peers<K, V, R, F>(
let mut peers: Vec<R> = Vec::with_capacity(max_num_peers_to_take);
for i in offset_first_half..end_first_half {
if let Some((_, peer)) = peer_map.get_index(i){
if let Some((k, peer)) = peer_map.get_index(i){
if *k != sender_peer_map_key {
peers.push(peer_conversion_function(peer))
}
}
}
for i in offset_second_half..end_second_half {
if let Some((_, peer)) = peer_map.get_index(i){
if let Some((k, peer)) = peer_map.get_index(i){
if *k != sender_peer_map_key {
peers.push(peer_conversion_function(peer))
}
}
debug_assert_eq!(peers.len(), max_num_peers_to_take);
}
peers
}

View file

@ -21,7 +21,7 @@ pub const LISTENER_TOKEN: Token = Token(0);
pub const CHANNEL_TOKEN: Token = Token(1);
pub trait Ip: Copy + Eq + ::std::hash::Hash {}
pub trait Ip: ::std::fmt::Debug + Copy + Eq + ::std::hash::Hash {}
impl Ip for Ipv4Addr {}
impl Ip for Ipv6Addr {}
@ -73,7 +73,7 @@ impl PeerStatus {
}
#[derive(Clone, Copy)]
#[derive(Debug, Clone, Copy)]
pub struct Peer<I: Ip> {
pub connection_meta: PeerConnectionMeta<I>,
pub port: u16,

View file

@ -194,7 +194,7 @@ fn upsert_peer_and_get_response_peers<I: Ip>(
valid_until: ValidUntil,
) -> (usize, usize, Vec<ResponsePeer<I>>) {
// Insert/update/remove peer who sent this request
{
let peer_status = PeerStatus::from_event_and_bytes_left(
request.event,
Some(request.bytes_left)
@ -207,6 +207,8 @@ fn upsert_peer_and_get_response_peers<I: Ip>(
valid_until,
};
::log::debug!("peer: {:?}", peer);
let ip_or_key = request.key
.map(Either::Right)
.unwrap_or_else(||
@ -218,22 +220,26 @@ fn upsert_peer_and_get_response_peers<I: Ip>(
ip_or_key,
};
::log::debug!("peer map key: {:?}", peer_map_key);
let opt_removed_peer = match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
torrent_data.peers.insert(peer_map_key, peer)
torrent_data.peers.insert(peer_map_key.clone(), peer)
},
PeerStatus::Seeding => {
torrent_data.num_seeders += 1;
torrent_data.peers.insert(peer_map_key, peer)
torrent_data.peers.insert(peer_map_key.clone(), peer)
},
PeerStatus::Stopped => {
torrent_data.peers.remove(&peer_map_key)
}
};
::log::debug!("opt_removed_peer: {:?}", opt_removed_peer);
match opt_removed_peer.map(|peer| peer.status){
Some(PeerStatus::Leeching) => {
torrent_data.num_leechers -= 1;
@ -243,7 +249,8 @@ fn upsert_peer_and_get_response_peers<I: Ip>(
},
_ => {}
}
}
::log::debug!("peer request numwant: {:?}", request.numwant);
let max_num_peers_to_take = match request.numwant {
Some(0) | None => config.protocol.max_peers,
@ -254,6 +261,7 @@ fn upsert_peer_and_get_response_peers<I: Ip>(
rng,
&torrent_data.peers,
max_num_peers_to_take,
peer_map_key,
Peer::to_response_peer
);

View file

@ -90,7 +90,7 @@ impl <I: Ip>Peer<I> {
}
#[derive(PartialEq, Eq, Hash, Clone)]
#[derive(PartialEq, Eq, Hash, Clone, Copy)]
pub struct PeerMapKey<I: Ip> {
pub ip: I,
pub peer_id: PeerId

View file

@ -291,6 +291,7 @@ fn handle_announce_request<I: Ip>(
rng,
&torrent_data.peers,
max_num_peers_to_take,
peer_key,
Peer::to_response_peer
);
@ -419,9 +420,17 @@ mod tests {
let mut peer_map: PeerMap<Ipv4Addr> = IndexMap::new();
let mut opt_sender_key = None;
let mut opt_sender_peer = None;
for i in 0..gen_num_peers {
let (key, value) = gen_peer_map_key_and_value(i);
if i == 0 {
opt_sender_key = Some(key);
opt_sender_peer = Some(value.to_response_peer());
}
peer_map.insert(key, value);
}
@ -431,6 +440,7 @@ mod tests {
&mut rng,
&peer_map,
req_num_peers,
opt_sender_key.unwrap_or_else(|| gen_peer_map_key_and_value(1).0),
Peer::to_response_peer
);
@ -439,15 +449,17 @@ mod tests {
let mut success = peers.len() <= req_num_peers;
if req_num_peers >= gen_num_peers as usize {
success &= peers.len() == gen_num_peers as usize;
success &= peers.len() == gen_num_peers as usize ||
peers.len() + 1 == gen_num_peers as usize;
}
// Check that returned peers are unique (no overlap)
// Check that returned peers are unique (no overlap) and that sender
// isn't returned
let mut ip_addresses = HashSet::new();
for peer in peers {
if ip_addresses.contains(&peer.ip_address){
if peer == opt_sender_peer.clone().unwrap() || ip_addresses.contains(&peer.ip_address){
success = false;
break;

View file

@ -178,23 +178,13 @@ pub fn handle_announce_requests(
rng,
&torrent_data.peers,
max_num_peers_to_take,
request.peer_id,
f
);
for (offer, offer_receiver) in offers.into_iter()
.zip(offer_receivers)
{
// Avoid sending offer back to requesting peer. This could be
// done in extract_announce_peers, but it would likely hurt
// performance to check all peers there for their socket addr,
// especially if there are thousands of peers. It might be
// possible to write a new version of that function which isn't
// shared with aquatic_udp and goes about it differently
// though.
if request_sender_meta.naive_peer_addr == offer_receiver.connection_meta.naive_peer_addr {
continue;
}
let middleman_offer = MiddlemanOfferToPeer {
action: AnnounceAction,
info_hash: request.info_hash,