Reduce ValidUntil size; reduce size of various ws structs

This commit is contained in:
Joakim Frostegård 2022-08-01 14:15:06 +02:00
parent 97fa699476
commit fcf18c845f
21 changed files with 343 additions and 193 deletions

View file

@ -29,16 +29,16 @@ pub struct State {
}
#[derive(Copy, Clone, Debug)]
pub struct PendingScrapeId(pub usize);
pub struct PendingScrapeId(pub u8);
#[derive(Copy, Clone, Debug)]
pub struct ConsumerId(pub usize);
pub struct ConsumerId(pub u8);
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ConnectionId(pub usize);
#[derive(Clone, Copy, Debug)]
pub struct ConnectionMeta {
pub struct InMessageMeta {
/// Index of socket worker responsible for this connection. Required for
/// sending back response through correct channel to correct worker.
pub out_message_consumer_id: ConsumerId,
@ -47,6 +47,25 @@ pub struct ConnectionMeta {
pub pending_scrape_id: Option<PendingScrapeId>,
}
#[derive(Clone, Copy, Debug)]
pub struct OutMessageMeta {
/// Index of socket worker responsible for this connection. Required for
/// sending back response through correct channel to correct worker.
pub out_message_consumer_id: ConsumerId,
pub connection_id: ConnectionId,
pub pending_scrape_id: Option<PendingScrapeId>,
}
impl Into<OutMessageMeta> for InMessageMeta {
fn into(self) -> OutMessageMeta {
OutMessageMeta {
out_message_consumer_id: self.out_message_consumer_id,
connection_id: self.connection_id,
pending_scrape_id: self.pending_scrape_id,
}
}
}
#[derive(Clone, Copy, Debug)]
pub enum SwarmControlMessage {
ConnectionClosed {

View file

@ -124,11 +124,11 @@ pub struct CleaningConfig {
/// Clean peers this often (seconds)
pub torrent_cleaning_interval: u64,
/// Remove peers that have not announced for this long (seconds)
pub max_peer_age: u64,
pub max_peer_age: u32,
// Clean connections this often (seconds)
pub connection_cleaning_interval: u64,
/// Close connections if no responses have been sent to them for this long (seconds)
pub max_connection_idle: u64,
pub max_connection_idle: u32,
}
impl Default for CleaningConfig {

View file

@ -8,7 +8,7 @@ use anyhow::Context;
use aquatic_common::cpu_pinning::glommio::{get_worker_placement, set_affinity_for_util_worker};
use aquatic_common::cpu_pinning::WorkerIndex;
use aquatic_common::rustls_config::create_rustls_config;
use aquatic_common::PanicSentinelWatcher;
use aquatic_common::{PanicSentinelWatcher, ServerStartInstant};
use glommio::{channels::channel_mesh::MeshBuilder, prelude::*};
use signal_hook::{
consts::{SIGTERM, SIGUSR1},
@ -49,14 +49,19 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
let priv_dropper = PrivilegeDropper::new(config.privileges.clone(), config.socket_workers);
let opt_tls_config = if config.network.enable_tls {
Some(Arc::new(create_rustls_config(
&config.network.tls_certificate_path,
&config.network.tls_private_key_path,
).with_context(|| "create rustls config")?))
Some(Arc::new(
create_rustls_config(
&config.network.tls_certificate_path,
&config.network.tls_private_key_path,
)
.with_context(|| "create rustls config")?,
))
} else {
None
};
let server_start_instant = ServerStartInstant::new();
let mut executors = Vec::new();
for i in 0..(config.socket_workers) {
@ -88,6 +93,7 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
request_mesh_builder,
response_mesh_builder,
priv_dropper,
server_start_instant,
)
.await
})
@ -121,6 +127,7 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
control_mesh_builder,
request_mesh_builder,
response_mesh_builder,
server_start_instant,
)
.await
})

View file

@ -4,13 +4,13 @@ use std::collections::BTreeMap;
use std::os::unix::prelude::{FromRawFd, IntoRawFd};
use std::rc::Rc;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Duration;
use anyhow::Context;
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
use aquatic_common::privileges::PrivilegeDropper;
use aquatic_common::rustls_config::RustlsConfig;
use aquatic_common::PanicSentinel;
use aquatic_common::{PanicSentinel, ServerStartInstant};
use aquatic_ws_protocol::*;
use async_tungstenite::WebSocketStream;
use futures::stream::{SplitSink, SplitStream};
@ -42,7 +42,7 @@ struct ConnectionReference {
task_handle: Option<JoinHandle<()>>,
/// Sender part of channel used to pass on outgoing messages from request
/// worker
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
/// Updated after sending message to peer
valid_until: ValidUntil,
peer_id: Option<PeerId>,
@ -56,9 +56,10 @@ pub async fn run_socket_worker(
state: State,
opt_tls_config: Option<Arc<RustlsConfig>>,
control_message_mesh_builder: MeshBuilder<SwarmControlMessage, Partial>,
in_message_mesh_builder: MeshBuilder<(ConnectionMeta, InMessage), Partial>,
out_message_mesh_builder: MeshBuilder<(ConnectionMeta, OutMessage), Partial>,
in_message_mesh_builder: MeshBuilder<(InMessageMeta, InMessage), Partial>,
out_message_mesh_builder: MeshBuilder<(OutMessageMeta, OutMessage), Partial>,
priv_dropper: PrivilegeDropper,
server_start_instant: ServerStartInstant,
) {
let config = Rc::new(config);
let access_list = state.access_list;
@ -84,7 +85,13 @@ pub async fn run_socket_worker(
let (_, mut out_message_receivers) =
out_message_mesh_builder.join(Role::Consumer).await.unwrap();
let out_message_consumer_id = ConsumerId(out_message_receivers.consumer_id().unwrap());
let out_message_consumer_id = ConsumerId(
out_message_receivers
.consumer_id()
.unwrap()
.try_into()
.unwrap(),
);
let connection_slab = Rc::new(RefCell::new(Slab::new()));
@ -94,6 +101,7 @@ pub async fn run_socket_worker(
clean_connections(
config.clone(),
connection_slab.clone(),
server_start_instant,
)
}),
tq_prioritized,
@ -129,13 +137,16 @@ pub async fn run_socket_worker(
let key = RefCell::borrow_mut(&connection_slab).insert(ConnectionReference {
task_handle: None,
out_message_sender: out_message_sender.clone(),
valid_until: ValidUntil::new(config.cleaning.max_connection_idle),
valid_until: ValidUntil::new(
server_start_instant,
config.cleaning.max_connection_idle,
),
peer_id: None,
announced_info_hashes: Default::default(),
ip_version,
});
::log::info!("accepting stream, assigning id {}", key);
::log::trace!("accepting stream, assigning id {}", key);
let task_handle = spawn_local_into(enclose!((config, access_list, control_message_senders, in_message_senders, connection_slab, opt_tls_config) async move {
if let Err(err) = run_connection(
@ -147,6 +158,7 @@ pub async fn run_socket_worker(
connection_slab.clone(),
out_message_sender,
out_message_receiver,
server_start_instant,
out_message_consumer_id,
ConnectionId(key),
opt_tls_config,
@ -204,11 +216,12 @@ pub async fn run_socket_worker(
async fn clean_connections(
config: Rc<Config>,
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
server_start_instant: ServerStartInstant,
) -> Option<Duration> {
let now = Instant::now();
let now = server_start_instant.seconds_elapsed();
connection_slab.borrow_mut().retain(|_, reference| {
if reference.valid_until.0 > now {
if reference.valid_until.valid(now) {
true
} else {
if let Some(ref handle) = reference.task_handle {
@ -227,14 +240,14 @@ async fn clean_connections(
}
async fn receive_out_messages(
mut out_message_receiver: ConnectedReceiver<(ConnectionMeta, OutMessage)>,
mut out_message_receiver: ConnectedReceiver<(OutMessageMeta, OutMessage)>,
connection_references: Rc<RefCell<Slab<ConnectionReference>>>,
) {
let connection_references = &connection_references;
while let Some((meta, out_message)) = out_message_receiver.next().await {
if let Some(reference) = connection_references.borrow().get(meta.connection_id.0) {
::log::info!(
::log::trace!(
"local channel {} len: {}",
meta.connection_id.0,
reference.out_message_sender.len()
@ -245,7 +258,7 @@ async fn receive_out_messages(
Err(GlommioError::Closed(_)) => {}
Err(GlommioError::WouldBlock(_)) => {}
Err(err) => {
::log::info!(
::log::debug!(
"Couldn't send out_message from shared channel to local receiver: {:?}",
err
);
@ -258,12 +271,13 @@ async fn receive_out_messages(
async fn run_connection(
config: Rc<Config>,
access_list: Arc<AccessListArcSwap>,
in_message_senders: Rc<Senders<(ConnectionMeta, InMessage)>>,
in_message_senders: Rc<Senders<(InMessageMeta, InMessage)>>,
tq_prioritized: TaskQueueHandle,
tq_regular: TaskQueueHandle,
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
out_message_receiver: LocalReceiver<(ConnectionMeta, OutMessage)>,
out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
out_message_receiver: LocalReceiver<(OutMessageMeta, OutMessage)>,
server_start_instant: ServerStartInstant,
out_message_consumer_id: ConsumerId,
connection_id: ConnectionId,
opt_tls_config: Option<Arc<RustlsConfig>>,
@ -284,6 +298,7 @@ async fn run_connection(
connection_slab.clone(),
out_message_sender,
out_message_receiver,
server_start_instant,
out_message_consumer_id,
connection_id,
stream,
@ -329,6 +344,7 @@ async fn run_connection(
connection_slab.clone(),
out_message_sender,
out_message_receiver,
server_start_instant,
out_message_consumer_id,
connection_id,
stream,
@ -343,12 +359,13 @@ async fn run_stream_agnostic_connection<
>(
config: Rc<Config>,
access_list: Arc<AccessListArcSwap>,
in_message_senders: Rc<Senders<(ConnectionMeta, InMessage)>>,
in_message_senders: Rc<Senders<(InMessageMeta, InMessage)>>,
tq_prioritized: TaskQueueHandle,
tq_regular: TaskQueueHandle,
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
out_message_receiver: LocalReceiver<(ConnectionMeta, OutMessage)>,
out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
out_message_receiver: LocalReceiver<(OutMessageMeta, OutMessage)>,
server_start_instant: ServerStartInstant,
out_message_consumer_id: ConsumerId,
connection_id: ConnectionId,
stream: S,
@ -400,6 +417,7 @@ async fn run_stream_agnostic_connection<
ws_out,
pending_scrape_slab,
connection_id,
server_start_instant,
};
let result = writer.run_out_message_loop().await;
@ -418,8 +436,8 @@ struct ConnectionReader<S> {
config: Rc<Config>,
access_list_cache: AccessListCache,
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
in_message_senders: Rc<Senders<(ConnectionMeta, InMessage)>>,
out_message_sender: Rc<LocalSender<(ConnectionMeta, OutMessage)>>,
in_message_senders: Rc<Senders<(InMessageMeta, InMessage)>>,
out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
out_message_consumer_id: ConsumerId,
ws_in: SplitStream<WebSocketStream<S>>,
@ -430,8 +448,6 @@ struct ConnectionReader<S> {
impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
async fn run_in_message_loop(&mut self) -> anyhow::Result<()> {
loop {
::log::debug!("read_in_message");
while self.out_message_sender.is_full() {
sleep(Duration::from_millis(100)).await;
@ -442,8 +458,6 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
match InMessage::from_ws_message(message) {
Ok(in_message) => {
::log::debug!("parsed in_message");
self.handle_in_message(in_message).await?;
}
Err(err) => {
@ -516,7 +530,6 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
)
.await
.unwrap();
::log::info!("sent message to swarm worker");
} else {
self.send_error_response(
"Info hash not allowed".into(),
@ -559,11 +572,14 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
stats: Default::default(),
};
let pending_scrape_id = PendingScrapeId(
RefCell::borrow_mut(&mut self.pending_scrape_slab)
.insert(pending_scrape_response),
);
let meta = self.make_connection_meta(Some(pending_scrape_id));
let pending_scrape_id: u8 = self
.pending_scrape_slab
.borrow_mut()
.insert(pending_scrape_response)
.try_into()
.with_context(|| "Reached 256 pending scrape responses")?;
let meta = self.make_connection_meta(Some(PendingScrapeId(pending_scrape_id)));
for (consumer_index, info_hashes) in info_hashes_by_worker {
let in_message = InMessage::ScrapeRequest(ScrapeRequest {
@ -576,7 +592,6 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
.send_to(consumer_index, (meta, in_message))
.await
.unwrap();
::log::info!("sent message to swarm worker");
}
}
}
@ -597,13 +612,13 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
});
self.out_message_sender
.send((self.make_connection_meta(None), out_message))
.send((self.make_connection_meta(None).into(), out_message))
.await
.map_err(|err| anyhow::anyhow!("ConnectionReader::send_error_response failed: {}", err))
}
fn make_connection_meta(&self, pending_scrape_id: Option<PendingScrapeId>) -> ConnectionMeta {
ConnectionMeta {
fn make_connection_meta(&self, pending_scrape_id: Option<PendingScrapeId>) -> InMessageMeta {
InMessageMeta {
connection_id: self.connection_id,
out_message_consumer_id: self.out_message_consumer_id,
ip_version: self.ip_version,
@ -614,10 +629,11 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
struct ConnectionWriter<S> {
config: Rc<Config>,
out_message_receiver: LocalReceiver<(ConnectionMeta, OutMessage)>,
out_message_receiver: LocalReceiver<(OutMessageMeta, OutMessage)>,
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
ws_out: SplitSink<WebSocketStream<S>, tungstenite::Message>,
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
server_start_instant: ServerStartInstant,
connection_id: ConnectionId,
}
@ -636,7 +652,7 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> {
let finished = if let Some(pending) = Slab::get_mut(
&mut RefCell::borrow_mut(&self.pending_scrape_slab),
pending_scrape_id.0,
pending_scrape_id.0 as usize,
) {
pending.stats.extend(out_message.files);
pending.pending_worker_out_messages -= 1;
@ -650,7 +666,7 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> {
let out_message = {
let mut slab = RefCell::borrow_mut(&self.pending_scrape_slab);
let pending = slab.remove(pending_scrape_id.0);
let pending = slab.remove(pending_scrape_id.0 as usize);
slab.shrink_to_fit();
@ -690,13 +706,16 @@ impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> {
self.connection_id.0
)
})?
.valid_until = ValidUntil::new(self.config.cleaning.max_connection_idle);
.valid_until = ValidUntil::new(
self.server_start_instant,
self.config.cleaning.max_connection_idle,
);
Ok(())
}
Ok(Err(err)) => Err(err.into()),
Err(err) => {
::log::info!("send_out_message: sending to peer took to long: {}", err);
::log::debug!("send_out_message: sending to peer took to long: {}", err);
Ok(())
}

View file

@ -1,7 +1,7 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Duration;
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
use futures::StreamExt;
@ -12,7 +12,10 @@ use glommio::timer::TimerActionRepeat;
use hashbrown::HashMap;
use rand::{rngs::SmallRng, SeedableRng};
use aquatic_common::{extract_response_peers, AmortizedIndexMap, PanicSentinel};
use aquatic_common::{
extract_response_peers, AmortizedIndexMap, PanicSentinel, SecondsSinceServerStart,
ServerStartInstant,
};
use aquatic_ws_protocol::*;
use crate::common::*;
@ -44,8 +47,9 @@ impl PeerStatus {
#[derive(Clone, Copy)]
struct Peer {
pub connection_meta: ConnectionMeta,
pub status: PeerStatus,
pub consumer_id: ConsumerId,
pub connection_id: ConnectionId,
pub seeder: bool,
pub valid_until: ValidUntil,
}
@ -71,14 +75,10 @@ impl Default for TorrentData {
impl TorrentData {
pub fn remove_peer(&mut self, peer_id: PeerId) {
if let Some(peer) = self.peers.remove(&peer_id) {
match peer.status {
PeerStatus::Leeching => {
self.num_leechers -= 1;
}
PeerStatus::Seeding => {
self.num_seeders -= 1;
}
PeerStatus::Stopped => (),
if peer.seeder {
self.num_seeders -= 1;
} else {
self.num_leechers -= 1;
}
}
}
@ -93,20 +93,25 @@ struct TorrentMaps {
}
impl TorrentMaps {
fn clean(&mut self, config: &Config, access_list: &Arc<AccessListArcSwap>) {
fn clean(
&mut self,
config: &Config,
access_list: &Arc<AccessListArcSwap>,
server_start_instant: ServerStartInstant,
) {
let mut access_list_cache = create_access_list_cache(access_list);
let now = server_start_instant.seconds_elapsed();
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4);
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6);
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4, now);
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6, now);
}
fn clean_torrent_map(
config: &Config,
access_list_cache: &mut AccessListCache,
torrent_map: &mut TorrentMap,
now: SecondsSinceServerStart,
) {
let now = Instant::now();
torrent_map.retain(|info_hash, torrent_data| {
if !access_list_cache
.load()
@ -119,18 +124,14 @@ impl TorrentMaps {
let num_leechers = &mut torrent_data.num_leechers;
torrent_data.peers.retain(|_, peer| {
let keep = peer.valid_until.0 >= now;
let keep = peer.valid_until.valid(now);
if !keep {
match peer.status {
PeerStatus::Seeding => {
*num_seeders -= 1;
}
PeerStatus::Leeching => {
*num_leechers -= 1;
}
_ => (),
};
if peer.seeder {
*num_seeders -= 1;
} else {
*num_leechers -= 1;
}
}
keep
@ -148,8 +149,9 @@ pub async fn run_swarm_worker(
config: Config,
state: State,
control_message_mesh_builder: MeshBuilder<SwarmControlMessage, Partial>,
in_message_mesh_builder: MeshBuilder<(ConnectionMeta, InMessage), Partial>,
out_message_mesh_builder: MeshBuilder<(ConnectionMeta, OutMessage), Partial>,
in_message_mesh_builder: MeshBuilder<(InMessageMeta, InMessage), Partial>,
out_message_mesh_builder: MeshBuilder<(OutMessageMeta, OutMessage), Partial>,
server_start_instant: ServerStartInstant,
) {
let (_, mut control_message_receivers) = control_message_mesh_builder
.join(Role::Consumer)
@ -167,7 +169,7 @@ pub async fn run_swarm_worker(
// Periodically clean torrents
TimerActionRepeat::repeat(enclose!((config, torrents, access_list) move || {
enclose!((config, torrents, access_list) move || async move {
torrents.borrow_mut().clean(&config, &access_list);
torrents.borrow_mut().clean(&config, &access_list, server_start_instant);
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
})()
@ -186,6 +188,7 @@ pub async fn run_swarm_worker(
let handle = spawn_local(handle_request_stream(
config.clone(),
torrents.clone(),
server_start_instant,
out_message_senders.clone(),
receiver,
))
@ -229,19 +232,23 @@ where
async fn handle_request_stream<S>(
config: Config,
torrents: Rc<RefCell<TorrentMaps>>,
out_message_senders: Rc<Senders<(ConnectionMeta, OutMessage)>>,
server_start_instant: ServerStartInstant,
out_message_senders: Rc<Senders<(OutMessageMeta, OutMessage)>>,
stream: S,
) where
S: futures_lite::Stream<Item = (ConnectionMeta, InMessage)> + ::std::marker::Unpin,
S: futures_lite::Stream<Item = (InMessageMeta, InMessage)> + ::std::marker::Unpin,
{
let rng = Rc::new(RefCell::new(SmallRng::from_entropy()));
let max_peer_age = config.cleaning.max_peer_age;
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(max_peer_age)));
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(
server_start_instant,
max_peer_age,
)));
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
enclose!((peer_valid_until) move || async move {
*peer_valid_until.borrow_mut() = ValidUntil::new(max_peer_age);
*peer_valid_until.borrow_mut() = ValidUntil::new(server_start_instant, max_peer_age);
Some(Duration::from_secs(1))
})()
@ -279,14 +286,12 @@ async fn handle_request_stream<S>(
};
for (meta, out_message) in out_messages.drain(..) {
::log::info!("swarm worker trying to send OutMessage to socket worker");
out_message_senders
.send_to(meta.out_message_consumer_id.0, (meta, out_message))
.send_to(meta.out_message_consumer_id.0 as usize, (meta, out_message))
.await
.expect("failed sending out_message to socket worker");
::log::info!("swarm worker sent OutMessage to socket worker");
::log::debug!("swarm worker sent OutMessage to socket worker");
}
},
)
@ -297,9 +302,9 @@ fn handle_announce_request(
config: &Config,
rng: &mut SmallRng,
torrent_maps: &mut TorrentMaps,
out_messages: &mut Vec<(ConnectionMeta, OutMessage)>,
out_messages: &mut Vec<(OutMessageMeta, OutMessage)>,
valid_until: ValidUntil,
request_sender_meta: ConnectionMeta,
request_sender_meta: InMessageMeta,
request: AnnounceRequest,
) {
let torrent_data: &mut TorrentData = if let IpVersion::V4 = request_sender_meta.ip_version {
@ -313,7 +318,7 @@ fn handle_announce_request(
// peers have access to each others peer_id's, they could send requests
// using them, causing all sorts of issues.
if let Some(previous_peer) = torrent_data.peers.get(&request.peer_id) {
if request_sender_meta.connection_id != previous_peer.connection_meta.connection_id {
if request_sender_meta.connection_id != previous_peer.connection_id {
return;
}
}
@ -327,31 +332,39 @@ fn handle_announce_request(
request.bytes_left,
);
let peer = Peer {
connection_meta: request_sender_meta,
status: peer_status,
valid_until,
};
let opt_removed_peer = match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
let peer = Peer {
connection_id: request_sender_meta.connection_id,
consumer_id: request_sender_meta.out_message_consumer_id,
seeder: false,
valid_until,
};
torrent_data.peers.insert(request.peer_id, peer)
}
PeerStatus::Seeding => {
torrent_data.num_seeders += 1;
let peer = Peer {
connection_id: request_sender_meta.connection_id,
consumer_id: request_sender_meta.out_message_consumer_id,
seeder: true,
valid_until,
};
torrent_data.peers.insert(request.peer_id, peer)
}
PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id),
};
match opt_removed_peer.map(|peer| peer.status) {
Some(PeerStatus::Leeching) => {
match opt_removed_peer.map(|peer| peer.seeder) {
Some(false) => {
torrent_data.num_leechers -= 1;
}
Some(PeerStatus::Seeding) => {
Some(true) => {
torrent_data.num_seeders -= 1;
}
_ => {}
@ -385,14 +398,14 @@ fn handle_announce_request(
offer_id: offer.offer_id,
};
out_messages.push((
offer_receiver.connection_meta,
OutMessage::Offer(middleman_offer),
));
::log::trace!(
"sending middleman offer to {:?}",
offer_receiver.connection_meta
);
let meta = OutMessageMeta {
out_message_consumer_id: offer_receiver.consumer_id,
connection_id: offer_receiver.connection_id,
pending_scrape_id: None,
};
out_messages.push((meta, OutMessage::Offer(middleman_offer)));
::log::trace!("sending middleman offer to {:?}", meta);
}
}
@ -409,14 +422,14 @@ fn handle_announce_request(
offer_id,
};
out_messages.push((
answer_receiver.connection_meta,
OutMessage::Answer(middleman_answer),
));
::log::trace!(
"sending middleman answer to {:?}",
answer_receiver.connection_meta
);
let meta = OutMessageMeta {
out_message_consumer_id: answer_receiver.consumer_id,
connection_id: answer_receiver.connection_id,
pending_scrape_id: None,
};
out_messages.push((meta, OutMessage::Answer(middleman_answer)));
::log::trace!("sending middleman answer to {:?}", meta);
}
}
@ -428,14 +441,14 @@ fn handle_announce_request(
announce_interval: config.protocol.peer_announce_interval,
});
out_messages.push((request_sender_meta, out_message));
out_messages.push((request_sender_meta.into(), out_message));
}
fn handle_scrape_request(
config: &Config,
torrent_maps: &mut TorrentMaps,
out_messages: &mut Vec<(ConnectionMeta, OutMessage)>,
meta: ConnectionMeta,
out_messages: &mut Vec<(OutMessageMeta, OutMessage)>,
meta: InMessageMeta,
request: ScrapeRequest,
) {
let info_hashes = if let Some(info_hashes) = request.info_hashes {
@ -469,5 +482,5 @@ fn handle_scrape_request(
}
}
out_messages.push((meta, OutMessage::ScrapeResponse(out_message)));
out_messages.push((meta.into(), OutMessage::ScrapeResponse(out_message)));
}