mirror of
https://github.com/YGGverse/aquatic.git
synced 2026-03-31 17:55:36 +00:00
ws: refactor, bug fixes, improvements (#155)
- split swarm worker into two modules - split socket worker into two modules - keep track of which offers peers have sent and only allow matching answers - always clean up after closing connection - use channel for telling connections to close - move some logic into new ConnectionRunner struct - use slotmap for connection reference storage - fix double counting of error responses - actually close connections that take too long to send responses to - remove announced_info_hashes entry on AnnounceEvent::Stopped
This commit is contained in:
parent
af9d5a55f6
commit
fe5ccf6646
19 changed files with 1770 additions and 1583 deletions
13
CHANGELOG.md
13
CHANGELOG.md
|
|
@ -26,11 +26,24 @@
|
||||||
|
|
||||||
* Add support for reporting peer client information
|
* Add support for reporting peer client information
|
||||||
* Reload TLS certificate (and key) on SIGUSR1
|
* Reload TLS certificate (and key) on SIGUSR1
|
||||||
|
* Keep track of which offers peers have sent and only allow matching answers
|
||||||
|
|
||||||
#### Changed
|
#### Changed
|
||||||
|
|
||||||
|
* A response is no longer generated when peers announce with AnnounceEvent::Stopped
|
||||||
* Compiling with SIMD extensions enabled is no longer required, due to the
|
* Compiling with SIMD extensions enabled is no longer required, due to the
|
||||||
addition of runtime detection to simd-json
|
addition of runtime detection to simd-json
|
||||||
|
* Only consider announce and scrape responses as signs of connection still
|
||||||
|
being alive. Previously, all messages sent to peer were considered.
|
||||||
|
* Decrease default max_peer_age and max_connection_idle config values
|
||||||
|
|
||||||
|
#### Fixed
|
||||||
|
|
||||||
|
* Fix bug where clean up after closing connections wasn't always done
|
||||||
|
* Fix double counting of error responses
|
||||||
|
* Actually close connections that are too slow to send responses to
|
||||||
|
* If peers announce with AnnounceEvent::Stopped, allow them to later announce on
|
||||||
|
same torrent with different peer_id
|
||||||
|
|
||||||
## 0.8.0 - 2023-03-17
|
## 0.8.0 - 2023-03-17
|
||||||
|
|
||||||
|
|
|
||||||
11
Cargo.lock
generated
11
Cargo.lock
generated
|
|
@ -321,6 +321,7 @@ dependencies = [
|
||||||
"glommio",
|
"glommio",
|
||||||
"hashbrown 0.14.1",
|
"hashbrown 0.14.1",
|
||||||
"httparse",
|
"httparse",
|
||||||
|
"indexmap 2.0.2",
|
||||||
"log",
|
"log",
|
||||||
"metrics",
|
"metrics",
|
||||||
"metrics-exporter-prometheus",
|
"metrics-exporter-prometheus",
|
||||||
|
|
@ -335,6 +336,7 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"signal-hook",
|
"signal-hook",
|
||||||
"slab",
|
"slab",
|
||||||
|
"slotmap",
|
||||||
"socket2 0.5.4",
|
"socket2 0.5.4",
|
||||||
"tungstenite",
|
"tungstenite",
|
||||||
]
|
]
|
||||||
|
|
@ -2491,6 +2493,15 @@ dependencies = [
|
||||||
"autocfg",
|
"autocfg",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "slotmap"
|
||||||
|
version = "1.0.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342"
|
||||||
|
dependencies = [
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "smallvec"
|
name = "smallvec"
|
||||||
version = "1.11.1"
|
version = "1.11.1"
|
||||||
|
|
|
||||||
3
TODO.md
3
TODO.md
|
|
@ -3,9 +3,6 @@
|
||||||
## High priority
|
## High priority
|
||||||
|
|
||||||
* aquatic_ws
|
* aquatic_ws
|
||||||
* Store OfferId that a peer has sent out and only allow answers matching
|
|
||||||
them to be sent? HashMap<(OfferId, PeerId), ValidUntil> could work, but
|
|
||||||
not if peers reuse offer ids
|
|
||||||
* Validate SDP data
|
* Validate SDP data
|
||||||
|
|
||||||
## Medium priority
|
## Medium priority
|
||||||
|
|
|
||||||
|
|
@ -156,7 +156,7 @@ pub fn extract_response_peers<K, V, R, F>(
|
||||||
) -> Vec<R>
|
) -> Vec<R>
|
||||||
where
|
where
|
||||||
K: Eq + ::std::hash::Hash,
|
K: Eq + ::std::hash::Hash,
|
||||||
F: Fn(&V) -> R,
|
F: Fn(&K, &V) -> R,
|
||||||
{
|
{
|
||||||
if peer_map.len() <= max_num_peers_to_take + 1 {
|
if peer_map.len() <= max_num_peers_to_take + 1 {
|
||||||
// This branch: number of peers in map (minus sender peer) is less than
|
// This branch: number of peers in map (minus sender peer) is less than
|
||||||
|
|
@ -165,7 +165,7 @@ where
|
||||||
let mut peers = Vec::with_capacity(peer_map.len());
|
let mut peers = Vec::with_capacity(peer_map.len());
|
||||||
|
|
||||||
peers.extend(peer_map.iter().filter_map(|(k, v)| {
|
peers.extend(peer_map.iter().filter_map(|(k, v)| {
|
||||||
(*k != sender_peer_map_key).then_some(peer_conversion_function(v))
|
(*k != sender_peer_map_key).then_some(peer_conversion_function(k, v))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Handle the case when sender peer is not in peer list. Typically,
|
// Handle the case when sender peer is not in peer list. Typically,
|
||||||
|
|
@ -204,12 +204,12 @@ where
|
||||||
|
|
||||||
if let Some(slice) = peer_map.get_range(offset_half_one..end_half_one) {
|
if let Some(slice) = peer_map.get_range(offset_half_one..end_half_one) {
|
||||||
peers.extend(slice.iter().filter_map(|(k, v)| {
|
peers.extend(slice.iter().filter_map(|(k, v)| {
|
||||||
(*k != sender_peer_map_key).then_some(peer_conversion_function(v))
|
(*k != sender_peer_map_key).then_some(peer_conversion_function(k, v))
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
if let Some(slice) = peer_map.get_range(offset_half_two..end_half_two) {
|
if let Some(slice) = peer_map.get_range(offset_half_two..end_half_two) {
|
||||||
peers.extend(slice.iter().filter_map(|(k, v)| {
|
peers.extend(slice.iter().filter_map(|(k, v)| {
|
||||||
(*k != sender_peer_map_key).then_some(peer_conversion_function(v))
|
(*k != sender_peer_map_key).then_some(peer_conversion_function(k, v))
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -260,7 +260,7 @@ mod tests {
|
||||||
&peer_map,
|
&peer_map,
|
||||||
max_num_peers_to_take,
|
max_num_peers_to_take,
|
||||||
sender_peer_map_key,
|
sender_peer_map_key,
|
||||||
|p| *p,
|
|_, p| *p,
|
||||||
);
|
);
|
||||||
|
|
||||||
if num_peers_in_map > max_num_peers_to_take + 1 {
|
if num_peers_in_map > max_num_peers_to_take + 1 {
|
||||||
|
|
|
||||||
|
|
@ -79,10 +79,10 @@ pub struct Peer<I: Ip> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<I: Ip> Peer<I> {
|
impl<I: Ip> Peer<I> {
|
||||||
pub fn to_response_peer(&self) -> ResponsePeer<I> {
|
pub fn to_response_peer(_: &PeerMapKey<I>, peer: &Self) -> ResponsePeer<I> {
|
||||||
ResponsePeer {
|
ResponsePeer {
|
||||||
ip_address: self.ip_address,
|
ip_address: peer.ip_address,
|
||||||
port: self.port,
|
port: peer.port,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -30,10 +30,10 @@ struct Peer<I: Ip> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<I: Ip> Peer<I> {
|
impl<I: Ip> Peer<I> {
|
||||||
fn to_response_peer(&self) -> ResponsePeer<I> {
|
fn to_response_peer(_: &PeerId, peer: &Self) -> ResponsePeer<I> {
|
||||||
ResponsePeer {
|
ResponsePeer {
|
||||||
ip_address: self.ip_address,
|
ip_address: peer.ip_address,
|
||||||
port: self.port,
|
port: peer.port,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -352,7 +352,7 @@ mod tests {
|
||||||
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
opt_sender_key = Some(key);
|
opt_sender_key = Some(key);
|
||||||
opt_sender_peer = Some(peer.to_response_peer());
|
opt_sender_peer = Some(Peer::to_response_peer(&key, &peer));
|
||||||
}
|
}
|
||||||
|
|
||||||
peer_map.insert(key, peer);
|
peer_map.insert(key, peer);
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,7 @@ futures-rustls = "0.24"
|
||||||
glommio = "0.8"
|
glommio = "0.8"
|
||||||
hashbrown = { version = "0.14", features = ["serde"] }
|
hashbrown = { version = "0.14", features = ["serde"] }
|
||||||
httparse = "1"
|
httparse = "1"
|
||||||
|
indexmap = "2"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
metrics = { version = "0.21", optional = true }
|
metrics = { version = "0.21", optional = true }
|
||||||
metrics-util = { version = "0.15", optional = true }
|
metrics-util = { version = "0.15", optional = true }
|
||||||
|
|
@ -50,6 +51,7 @@ rustls-pemfile = "1"
|
||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
signal-hook = { version = "0.3" }
|
signal-hook = { version = "0.3" }
|
||||||
slab = "0.4"
|
slab = "0.4"
|
||||||
|
slotmap = "1"
|
||||||
socket2 = { version = "0.5", features = ["all"] }
|
socket2 = { version = "0.5", features = ["all"] }
|
||||||
tungstenite = "0.20"
|
tungstenite = "0.20"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -34,8 +34,9 @@ pub struct PendingScrapeId(pub u8);
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
pub struct ConsumerId(pub u8);
|
pub struct ConsumerId(pub u8);
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
slotmap::new_key_type! {
|
||||||
pub struct ConnectionId(pub usize);
|
pub struct ConnectionId;
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct InMessageMeta {
|
pub struct InMessageMeta {
|
||||||
|
|
|
||||||
|
|
@ -154,6 +154,8 @@ pub struct CleaningConfig {
|
||||||
pub torrent_cleaning_interval: u64,
|
pub torrent_cleaning_interval: u64,
|
||||||
/// Remove peers that have not announced for this long (seconds)
|
/// Remove peers that have not announced for this long (seconds)
|
||||||
pub max_peer_age: u32,
|
pub max_peer_age: u32,
|
||||||
|
/// Require that offers are answered to withing this period (seconds)
|
||||||
|
pub max_offer_age: u32,
|
||||||
// Clean connections this often (seconds)
|
// Clean connections this often (seconds)
|
||||||
pub connection_cleaning_interval: u64,
|
pub connection_cleaning_interval: u64,
|
||||||
/// Close connections if no responses have been sent to them for this long (seconds)
|
/// Close connections if no responses have been sent to them for this long (seconds)
|
||||||
|
|
@ -169,8 +171,9 @@ impl Default for CleaningConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
torrent_cleaning_interval: 30,
|
torrent_cleaning_interval: 30,
|
||||||
max_peer_age: 1800,
|
max_peer_age: 180,
|
||||||
max_connection_idle: 60 * 5,
|
max_offer_age: 120,
|
||||||
|
max_connection_idle: 180,
|
||||||
connection_cleaning_interval: 30,
|
connection_cleaning_interval: 30,
|
||||||
close_after_tls_update_grace_period: 60 * 60 * 60,
|
close_after_tls_update_grace_period: 60 * 60 * 60,
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ pub fn run(config: Config) -> ::anyhow::Result<()> {
|
||||||
|
|
||||||
let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE);
|
let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE);
|
||||||
let response_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE * 16);
|
let response_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE * 16);
|
||||||
let control_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE);
|
let control_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE * 16);
|
||||||
|
|
||||||
let (sentinel_watcher, sentinel) = PanicSentinelWatcher::create_with_sentinel();
|
let (sentinel_watcher, sentinel) = PanicSentinelWatcher::create_with_sentinel();
|
||||||
let priv_dropper = PrivilegeDropper::new(config.privileges.clone(), config.socket_workers);
|
let priv_dropper = PrivilegeDropper::new(config.privileges.clone(), config.socket_workers);
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load diff
667
crates/ws/src/workers/socket/connection.rs
Normal file
667
crates/ws/src/workers/socket/connection.rs
Normal file
|
|
@ -0,0 +1,667 @@
|
||||||
|
use std::borrow::Cow;
|
||||||
|
use std::cell::RefCell;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::rc::Rc;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
||||||
|
use aquatic_common::rustls_config::RustlsConfig;
|
||||||
|
use aquatic_common::ServerStartInstant;
|
||||||
|
use aquatic_peer_id::PeerClient;
|
||||||
|
use aquatic_ws_protocol::*;
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
|
use async_tungstenite::WebSocketStream;
|
||||||
|
use futures::stream::{SplitSink, SplitStream};
|
||||||
|
use futures::{AsyncWriteExt, StreamExt};
|
||||||
|
use futures_lite::future::race;
|
||||||
|
use futures_rustls::TlsAcceptor;
|
||||||
|
use glommio::channels::channel_mesh::Senders;
|
||||||
|
use glommio::channels::local_channel::{LocalReceiver, LocalSender};
|
||||||
|
use glommio::net::TcpStream;
|
||||||
|
use glommio::timer::{sleep, timeout};
|
||||||
|
use glommio::{enclose, prelude::*};
|
||||||
|
use hashbrown::hash_map::Entry;
|
||||||
|
use hashbrown::HashMap;
|
||||||
|
use slab::Slab;
|
||||||
|
|
||||||
|
use crate::common::*;
|
||||||
|
use crate::config::Config;
|
||||||
|
use crate::workers::socket::calculate_in_message_consumer_index;
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
use crate::workers::socket::{ip_version_to_metrics_str, WORKER_INDEX};
|
||||||
|
|
||||||
|
pub struct ConnectionRunner {
|
||||||
|
pub config: Rc<Config>,
|
||||||
|
pub access_list: Arc<AccessListArcSwap>,
|
||||||
|
pub in_message_senders: Rc<Senders<(InMessageMeta, InMessage)>>,
|
||||||
|
pub tq_prioritized: TaskQueueHandle,
|
||||||
|
pub tq_regular: TaskQueueHandle,
|
||||||
|
pub connection_valid_until: Rc<RefCell<ValidUntil>>,
|
||||||
|
pub out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
|
||||||
|
pub out_message_receiver: LocalReceiver<(OutMessageMeta, OutMessage)>,
|
||||||
|
pub close_conn_receiver: LocalReceiver<()>,
|
||||||
|
pub server_start_instant: ServerStartInstant,
|
||||||
|
pub out_message_consumer_id: ConsumerId,
|
||||||
|
pub connection_id: ConnectionId,
|
||||||
|
pub opt_tls_config: Option<Arc<ArcSwap<RustlsConfig>>>,
|
||||||
|
pub ip_version: IpVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionRunner {
|
||||||
|
pub async fn run(
|
||||||
|
self,
|
||||||
|
control_message_senders: Rc<Senders<SwarmControlMessage>>,
|
||||||
|
stream: TcpStream,
|
||||||
|
) {
|
||||||
|
let clean_up_data = ConnectionCleanupData {
|
||||||
|
announced_info_hashes: Default::default(),
|
||||||
|
ip_version: self.ip_version,
|
||||||
|
opt_peer_client: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
clean_up_data.before_open();
|
||||||
|
|
||||||
|
let config = self.config.clone();
|
||||||
|
|
||||||
|
if let Err(err) = self.run_inner(clean_up_data.clone(), stream).await {
|
||||||
|
::log::debug!("connection error: {:#}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
clean_up_data
|
||||||
|
.after_close(&config, control_message_senders)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_inner(
|
||||||
|
self,
|
||||||
|
clean_up_data: ConnectionCleanupData,
|
||||||
|
mut stream: TcpStream,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
if let Some(tls_config) = self.opt_tls_config.as_ref() {
|
||||||
|
let tls_acceptor: TlsAcceptor = tls_config.load_full().into();
|
||||||
|
|
||||||
|
let stream = tls_acceptor.accept(stream).await?;
|
||||||
|
|
||||||
|
self.run_inner_stream_agnostic(clean_up_data, stream).await
|
||||||
|
} else {
|
||||||
|
// Implementing this over TLS is too cumbersome, since the crate used
|
||||||
|
// for TLS streams doesn't support peek and tungstenite doesn't
|
||||||
|
// properly support sending a HTTP error response in accept_hdr
|
||||||
|
// callback.
|
||||||
|
if self.config.network.enable_http_health_checks {
|
||||||
|
let mut peek_buf = [0u8; 11];
|
||||||
|
|
||||||
|
stream
|
||||||
|
.peek(&mut peek_buf)
|
||||||
|
.await
|
||||||
|
.map_err(|err| anyhow::anyhow!("error peeking: {:#}", err))?;
|
||||||
|
|
||||||
|
if &peek_buf == b"GET /health" {
|
||||||
|
stream
|
||||||
|
.write_all(b"HTTP/1.1 200 Ok\r\nContent-Length: 2\r\n\r\nOk")
|
||||||
|
.await
|
||||||
|
.map_err(|err| {
|
||||||
|
anyhow::anyhow!("error sending health check response: {:#}", err)
|
||||||
|
})?;
|
||||||
|
stream.flush().await.map_err(|err| {
|
||||||
|
anyhow::anyhow!("error flushing health check response: {:#}", err)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"client requested health check, skipping websocket negotiation"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.run_inner_stream_agnostic(clean_up_data, stream).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_inner_stream_agnostic<S>(
|
||||||
|
self,
|
||||||
|
clean_up_data: ConnectionCleanupData,
|
||||||
|
stream: S,
|
||||||
|
) -> anyhow::Result<()>
|
||||||
|
where
|
||||||
|
S: futures::AsyncRead + futures::AsyncWrite + Unpin + 'static,
|
||||||
|
{
|
||||||
|
let ws_config = tungstenite::protocol::WebSocketConfig {
|
||||||
|
max_frame_size: Some(self.config.network.websocket_max_frame_size),
|
||||||
|
max_message_size: Some(self.config.network.websocket_max_message_size),
|
||||||
|
write_buffer_size: self.config.network.websocket_write_buffer_size,
|
||||||
|
max_write_buffer_size: self.config.network.websocket_write_buffer_size * 3,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let stream = async_tungstenite::accept_async_with_config(stream, Some(ws_config)).await?;
|
||||||
|
|
||||||
|
let (ws_out, ws_in) = futures::StreamExt::split(stream);
|
||||||
|
|
||||||
|
let pending_scrape_slab = Rc::new(RefCell::new(Slab::new()));
|
||||||
|
let access_list_cache = create_access_list_cache(&self.access_list);
|
||||||
|
|
||||||
|
let config = self.config.clone();
|
||||||
|
|
||||||
|
let reader_handle = spawn_local_into(
|
||||||
|
enclose!((pending_scrape_slab, clean_up_data) async move {
|
||||||
|
let mut reader = ConnectionReader {
|
||||||
|
config: self.config.clone(),
|
||||||
|
access_list_cache,
|
||||||
|
in_message_senders: self.in_message_senders,
|
||||||
|
out_message_sender: self.out_message_sender,
|
||||||
|
pending_scrape_slab,
|
||||||
|
out_message_consumer_id: self.out_message_consumer_id,
|
||||||
|
ws_in,
|
||||||
|
ip_version: self.ip_version,
|
||||||
|
connection_id: self.connection_id,
|
||||||
|
clean_up_data: clean_up_data.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
reader.run_in_message_loop().await
|
||||||
|
}),
|
||||||
|
self.tq_regular,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let writer_handle = spawn_local_into(
|
||||||
|
async move {
|
||||||
|
let mut writer = ConnectionWriter {
|
||||||
|
config,
|
||||||
|
out_message_receiver: self.out_message_receiver,
|
||||||
|
connection_valid_until: self.connection_valid_until,
|
||||||
|
ws_out,
|
||||||
|
pending_scrape_slab,
|
||||||
|
server_start_instant: self.server_start_instant,
|
||||||
|
ip_version: self.ip_version,
|
||||||
|
clean_up_data,
|
||||||
|
};
|
||||||
|
|
||||||
|
writer.run_out_message_loop().await
|
||||||
|
},
|
||||||
|
self.tq_prioritized,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let close_conn_future = spawn_local_into(
|
||||||
|
async move {
|
||||||
|
self.close_conn_receiver.recv().await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
self.tq_prioritized,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
race(close_conn_future, race(reader_handle, writer_handle)).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ConnectionReader<S> {
|
||||||
|
config: Rc<Config>,
|
||||||
|
access_list_cache: AccessListCache,
|
||||||
|
in_message_senders: Rc<Senders<(InMessageMeta, InMessage)>>,
|
||||||
|
out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
|
||||||
|
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
|
||||||
|
out_message_consumer_id: ConsumerId,
|
||||||
|
ws_in: SplitStream<WebSocketStream<S>>,
|
||||||
|
ip_version: IpVersion,
|
||||||
|
connection_id: ConnectionId,
|
||||||
|
clean_up_data: ConnectionCleanupData,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionReader<S> {
|
||||||
|
async fn run_in_message_loop(&mut self) -> anyhow::Result<()> {
|
||||||
|
loop {
|
||||||
|
while self.out_message_sender.is_full() {
|
||||||
|
sleep(Duration::from_millis(100)).await;
|
||||||
|
|
||||||
|
yield_if_needed().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let message = self
|
||||||
|
.ws_in
|
||||||
|
.next()
|
||||||
|
.await
|
||||||
|
.ok_or_else(|| anyhow::anyhow!("Stream ended"))??;
|
||||||
|
|
||||||
|
match &message {
|
||||||
|
tungstenite::Message::Text(_) | tungstenite::Message::Binary(_) => {
|
||||||
|
match InMessage::from_ws_message(message) {
|
||||||
|
Ok(in_message) => {
|
||||||
|
self.handle_in_message(in_message).await?;
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
::log::debug!("Couldn't parse in_message: {:?}", err);
|
||||||
|
|
||||||
|
self.send_error_response("Invalid request".into(), None, None)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tungstenite::Message::Ping(_) => {
|
||||||
|
::log::trace!("Received ping message");
|
||||||
|
// tungstenite sends a pong response by itself
|
||||||
|
}
|
||||||
|
tungstenite::Message::Pong(_) => {
|
||||||
|
::log::trace!("Received pong message");
|
||||||
|
}
|
||||||
|
tungstenite::Message::Close(_) => {
|
||||||
|
::log::debug!("Client sent close frame");
|
||||||
|
|
||||||
|
break Ok(());
|
||||||
|
}
|
||||||
|
tungstenite::Message::Frame(_) => {
|
||||||
|
::log::warn!("Read raw websocket frame, this should not happen");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
yield_if_needed().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_in_message(&mut self, in_message: InMessage) -> anyhow::Result<()> {
|
||||||
|
match in_message {
|
||||||
|
InMessage::AnnounceRequest(announce_request) => {
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::increment_counter!(
|
||||||
|
"aquatic_requests_total",
|
||||||
|
"type" => "announce",
|
||||||
|
"ip_version" => ip_version_to_metrics_str(self.ip_version),
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let info_hash = announce_request.info_hash;
|
||||||
|
|
||||||
|
if self
|
||||||
|
.access_list_cache
|
||||||
|
.load()
|
||||||
|
.allows(self.config.access_list.mode, &info_hash.0)
|
||||||
|
{
|
||||||
|
let mut announced_info_hashes =
|
||||||
|
self.clean_up_data.announced_info_hashes.borrow_mut();
|
||||||
|
|
||||||
|
// Store peer id / check if stored peer id matches
|
||||||
|
match announced_info_hashes.entry(announce_request.info_hash) {
|
||||||
|
Entry::Occupied(entry) => {
|
||||||
|
if *entry.get() != announce_request.peer_id {
|
||||||
|
// Drop Rc borrow before awaiting
|
||||||
|
drop(announced_info_hashes);
|
||||||
|
|
||||||
|
self.send_error_response(
|
||||||
|
"Only one peer id can be used per torrent".into(),
|
||||||
|
Some(ErrorResponseAction::Announce),
|
||||||
|
Some(info_hash),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"Peer used more than one PeerId for a single torrent"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Entry::Vacant(entry) => {
|
||||||
|
entry.insert(announce_request.peer_id);
|
||||||
|
|
||||||
|
// Set peer client info if not set
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
if self.config.metrics.run_prometheus_endpoint
|
||||||
|
&& self.config.metrics.peer_clients
|
||||||
|
&& self.clean_up_data.opt_peer_client.borrow().is_none()
|
||||||
|
{
|
||||||
|
let peer_id = aquatic_peer_id::PeerId(announce_request.peer_id.0);
|
||||||
|
let client = peer_id.client();
|
||||||
|
let prefix = peer_id.first_8_bytes_hex().to_string();
|
||||||
|
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_peer_clients",
|
||||||
|
1.0,
|
||||||
|
"client" => client.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if self.config.metrics.peer_id_prefixes {
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_peer_id_prefixes",
|
||||||
|
1.0,
|
||||||
|
"prefix_hex" => prefix.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
*self.clean_up_data.opt_peer_client.borrow_mut() =
|
||||||
|
Some((client, prefix));
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(AnnounceEvent::Stopped) = announce_request.event {
|
||||||
|
announced_info_hashes.remove(&announce_request.info_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop Rc borrow before awaiting
|
||||||
|
drop(announced_info_hashes);
|
||||||
|
|
||||||
|
let in_message = InMessage::AnnounceRequest(announce_request);
|
||||||
|
|
||||||
|
let consumer_index =
|
||||||
|
calculate_in_message_consumer_index(&self.config, info_hash);
|
||||||
|
|
||||||
|
// Only fails when receiver is closed
|
||||||
|
self.in_message_senders
|
||||||
|
.send_to(
|
||||||
|
consumer_index,
|
||||||
|
(self.make_connection_meta(None), in_message),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
} else {
|
||||||
|
self.send_error_response(
|
||||||
|
"Info hash not allowed".into(),
|
||||||
|
Some(ErrorResponseAction::Announce),
|
||||||
|
Some(info_hash),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
InMessage::ScrapeRequest(ScrapeRequest { info_hashes, .. }) => {
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::increment_counter!(
|
||||||
|
"aquatic_requests_total",
|
||||||
|
"type" => "scrape",
|
||||||
|
"ip_version" => ip_version_to_metrics_str(self.ip_version),
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let info_hashes = if let Some(info_hashes) = info_hashes {
|
||||||
|
info_hashes
|
||||||
|
} else {
|
||||||
|
// If request.info_hashes is empty, don't return scrape for all
|
||||||
|
// torrents, even though reference server does it. It is too expensive.
|
||||||
|
self.send_error_response(
|
||||||
|
"Full scrapes are not allowed".into(),
|
||||||
|
Some(ErrorResponseAction::Scrape),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut info_hashes_by_worker: BTreeMap<usize, Vec<InfoHash>> = BTreeMap::new();
|
||||||
|
|
||||||
|
for info_hash in info_hashes.as_vec() {
|
||||||
|
let info_hashes = info_hashes_by_worker
|
||||||
|
.entry(calculate_in_message_consumer_index(&self.config, info_hash))
|
||||||
|
.or_default();
|
||||||
|
|
||||||
|
info_hashes.push(info_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pending_worker_out_messages = info_hashes_by_worker.len();
|
||||||
|
|
||||||
|
let pending_scrape_response = PendingScrapeResponse {
|
||||||
|
pending_worker_out_messages,
|
||||||
|
stats: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let pending_scrape_id: u8 = self
|
||||||
|
.pending_scrape_slab
|
||||||
|
.borrow_mut()
|
||||||
|
.insert(pending_scrape_response)
|
||||||
|
.try_into()
|
||||||
|
.with_context(|| "Reached 256 pending scrape responses")?;
|
||||||
|
|
||||||
|
let meta = self.make_connection_meta(Some(PendingScrapeId(pending_scrape_id)));
|
||||||
|
|
||||||
|
for (consumer_index, info_hashes) in info_hashes_by_worker {
|
||||||
|
let in_message = InMessage::ScrapeRequest(ScrapeRequest {
|
||||||
|
action: ScrapeAction::Scrape,
|
||||||
|
info_hashes: Some(ScrapeRequestInfoHashes::Multiple(info_hashes)),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Only fails when receiver is closed
|
||||||
|
self.in_message_senders
|
||||||
|
.send_to(consumer_index, (meta, in_message))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_error_response(
|
||||||
|
&self,
|
||||||
|
failure_reason: Cow<'static, str>,
|
||||||
|
action: Option<ErrorResponseAction>,
|
||||||
|
info_hash: Option<InfoHash>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let out_message = OutMessage::ErrorResponse(ErrorResponse {
|
||||||
|
action,
|
||||||
|
failure_reason,
|
||||||
|
info_hash,
|
||||||
|
});
|
||||||
|
|
||||||
|
self.out_message_sender
|
||||||
|
.send((self.make_connection_meta(None).into(), out_message))
|
||||||
|
.await
|
||||||
|
.map_err(|err| {
|
||||||
|
anyhow::anyhow!("ConnectionReader::send_error_response failed: {:#}", err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_connection_meta(&self, pending_scrape_id: Option<PendingScrapeId>) -> InMessageMeta {
|
||||||
|
InMessageMeta {
|
||||||
|
connection_id: self.connection_id,
|
||||||
|
out_message_consumer_id: self.out_message_consumer_id,
|
||||||
|
ip_version: self.ip_version,
|
||||||
|
pending_scrape_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ConnectionWriter<S> {
|
||||||
|
config: Rc<Config>,
|
||||||
|
out_message_receiver: LocalReceiver<(OutMessageMeta, OutMessage)>,
|
||||||
|
connection_valid_until: Rc<RefCell<ValidUntil>>,
|
||||||
|
ws_out: SplitSink<WebSocketStream<S>, tungstenite::Message>,
|
||||||
|
pending_scrape_slab: Rc<RefCell<Slab<PendingScrapeResponse>>>,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
ip_version: IpVersion,
|
||||||
|
clean_up_data: ConnectionCleanupData,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: futures::AsyncRead + futures::AsyncWrite + Unpin> ConnectionWriter<S> {
|
||||||
|
async fn run_out_message_loop(&mut self) -> anyhow::Result<()> {
|
||||||
|
loop {
|
||||||
|
let (meta, out_message) = self.out_message_receiver.recv().await.ok_or_else(|| {
|
||||||
|
anyhow::anyhow!("ConnectionWriter couldn't receive message, sender is closed")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
match out_message {
|
||||||
|
OutMessage::ScrapeResponse(out_message) => {
|
||||||
|
let pending_scrape_id = meta
|
||||||
|
.pending_scrape_id
|
||||||
|
.expect("meta.pending_scrape_id not set");
|
||||||
|
|
||||||
|
let finished = if let Some(pending) = Slab::get_mut(
|
||||||
|
&mut RefCell::borrow_mut(&self.pending_scrape_slab),
|
||||||
|
pending_scrape_id.0 as usize,
|
||||||
|
) {
|
||||||
|
pending.stats.extend(out_message.files);
|
||||||
|
pending.pending_worker_out_messages -= 1;
|
||||||
|
|
||||||
|
pending.pending_worker_out_messages == 0
|
||||||
|
} else {
|
||||||
|
return Err(anyhow::anyhow!("pending scrape not found in slab"));
|
||||||
|
};
|
||||||
|
|
||||||
|
if finished {
|
||||||
|
let out_message = {
|
||||||
|
let mut slab = RefCell::borrow_mut(&self.pending_scrape_slab);
|
||||||
|
|
||||||
|
let pending = slab.remove(pending_scrape_id.0 as usize);
|
||||||
|
|
||||||
|
slab.shrink_to_fit();
|
||||||
|
|
||||||
|
OutMessage::ScrapeResponse(ScrapeResponse {
|
||||||
|
action: ScrapeAction::Scrape,
|
||||||
|
files: pending.stats,
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
self.send_out_message(&out_message).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
out_message => {
|
||||||
|
self.send_out_message(&out_message).await?;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_out_message(&mut self, out_message: &OutMessage) -> anyhow::Result<()> {
|
||||||
|
let result = timeout(Duration::from_secs(10), async {
|
||||||
|
let result =
|
||||||
|
futures::SinkExt::send(&mut self.ws_out, out_message.to_ws_message()).await;
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(Ok(())) => {
|
||||||
|
if let OutMessage::AnnounceResponse(_) | OutMessage::ScrapeResponse(_) = out_message
|
||||||
|
{
|
||||||
|
*self.connection_valid_until.borrow_mut() = ValidUntil::new(
|
||||||
|
self.server_start_instant,
|
||||||
|
self.config.cleaning.max_connection_idle,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
{
|
||||||
|
let out_message_type = match &out_message {
|
||||||
|
OutMessage::OfferOutMessage(_) => "offer",
|
||||||
|
OutMessage::AnswerOutMessage(_) => "offer_answer",
|
||||||
|
OutMessage::AnnounceResponse(_) => "announce",
|
||||||
|
OutMessage::ScrapeResponse(_) => "scrape",
|
||||||
|
OutMessage::ErrorResponse(_) => "error",
|
||||||
|
};
|
||||||
|
|
||||||
|
::metrics::increment_counter!(
|
||||||
|
"aquatic_responses_total",
|
||||||
|
"type" => out_message_type,
|
||||||
|
"ip_version" => ip_version_to_metrics_str(self.ip_version),
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some((peer_client, prefix)) =
|
||||||
|
self.clean_up_data.opt_peer_client.borrow().as_ref()
|
||||||
|
{
|
||||||
|
// As long as connection is still alive, increment peer client
|
||||||
|
// gauges by zero to prevent them from being removed due to
|
||||||
|
// idleness
|
||||||
|
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_peer_clients",
|
||||||
|
0.0,
|
||||||
|
"client" => peer_client.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if self.config.metrics.peer_id_prefixes {
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_peer_id_prefixes",
|
||||||
|
0.0,
|
||||||
|
"prefix_hex" => prefix.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(Err(err)) => Err(err.into()),
|
||||||
|
Err(err) => Err(anyhow::anyhow!(
|
||||||
|
"send_out_message: sending to peer took too long: {}",
|
||||||
|
err
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data stored with connection needed for cleanup after it closes
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct ConnectionCleanupData {
|
||||||
|
announced_info_hashes: Rc<RefCell<HashMap<InfoHash, PeerId>>>,
|
||||||
|
ip_version: IpVersion,
|
||||||
|
opt_peer_client: Rc<RefCell<Option<(PeerClient, String)>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionCleanupData {
|
||||||
|
fn before_open(&self) {
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_active_connections",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => ip_version_to_metrics_str(self.ip_version),
|
||||||
|
"worker_index" => WORKER_INDEX.get().to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
async fn after_close(
|
||||||
|
&self,
|
||||||
|
config: &Config,
|
||||||
|
control_message_senders: Rc<Senders<SwarmControlMessage>>,
|
||||||
|
) {
|
||||||
|
// Use RefCell::take to avoid issues with Rc borrow across await
|
||||||
|
let announced_info_hashes = self.announced_info_hashes.take();
|
||||||
|
|
||||||
|
// Tell swarm workers to remove peer
|
||||||
|
for (info_hash, peer_id) in announced_info_hashes.into_iter() {
|
||||||
|
let message = SwarmControlMessage::ConnectionClosed {
|
||||||
|
info_hash,
|
||||||
|
peer_id,
|
||||||
|
ip_version: self.ip_version,
|
||||||
|
};
|
||||||
|
|
||||||
|
let consumer_index = calculate_in_message_consumer_index(&config, info_hash);
|
||||||
|
|
||||||
|
control_message_senders
|
||||||
|
.send_to(consumer_index, message)
|
||||||
|
.await
|
||||||
|
.expect("control message receiver open");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
{
|
||||||
|
::metrics::decrement_gauge!(
|
||||||
|
"aquatic_active_connections",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => ip_version_to_metrics_str(self.ip_version),
|
||||||
|
"worker_index" => WORKER_INDEX.get().to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some((peer_client, prefix)) = self.opt_peer_client.borrow().as_ref() {
|
||||||
|
::metrics::decrement_gauge!(
|
||||||
|
"aquatic_peer_clients",
|
||||||
|
1.0,
|
||||||
|
"client" => peer_client.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if config.metrics.peer_id_prefixes {
|
||||||
|
::metrics::decrement_gauge!(
|
||||||
|
"aquatic_peer_id_prefixes",
|
||||||
|
1.0,
|
||||||
|
"prefix_hex" => prefix.to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PendingScrapeResponse {
|
||||||
|
pending_worker_out_messages: usize,
|
||||||
|
stats: HashMap<InfoHash, ScrapeStatistics>,
|
||||||
|
}
|
||||||
362
crates/ws/src/workers/socket/mod.rs
Normal file
362
crates/ws/src/workers/socket/mod.rs
Normal file
|
|
@ -0,0 +1,362 @@
|
||||||
|
use std::cell::RefCell;
|
||||||
|
use std::os::unix::prelude::{FromRawFd, IntoRawFd};
|
||||||
|
use std::rc::Rc;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
|
use aquatic_common::privileges::PrivilegeDropper;
|
||||||
|
use aquatic_common::rustls_config::RustlsConfig;
|
||||||
|
use aquatic_common::{PanicSentinel, ServerStartInstant};
|
||||||
|
use aquatic_ws_protocol::*;
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role};
|
||||||
|
use glommio::channels::local_channel::{new_bounded, LocalSender};
|
||||||
|
use glommio::channels::shared_channel::ConnectedReceiver;
|
||||||
|
use glommio::net::TcpListener;
|
||||||
|
use glommio::timer::TimerActionRepeat;
|
||||||
|
use glommio::{enclose, prelude::*};
|
||||||
|
use slotmap::HopSlotMap;
|
||||||
|
|
||||||
|
use crate::config::Config;
|
||||||
|
|
||||||
|
use crate::common::*;
|
||||||
|
use crate::workers::socket::connection::ConnectionRunner;
|
||||||
|
|
||||||
|
mod connection;
|
||||||
|
|
||||||
|
type ConnectionHandles = HopSlotMap<ConnectionId, ConnectionHandle>;
|
||||||
|
|
||||||
|
const LOCAL_CHANNEL_SIZE: usize = 16;
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
thread_local! { static WORKER_INDEX: ::std::cell::Cell<usize> = Default::default() }
|
||||||
|
|
||||||
|
/// Used to interact with the connection tasks
|
||||||
|
struct ConnectionHandle {
|
||||||
|
close_conn_sender: LocalSender<()>,
|
||||||
|
/// Sender part of channel used to pass on outgoing messages from request
|
||||||
|
/// worker
|
||||||
|
out_message_sender: Rc<LocalSender<(OutMessageMeta, OutMessage)>>,
|
||||||
|
/// Updated after sending message to peer
|
||||||
|
valid_until: Rc<RefCell<ValidUntil>>,
|
||||||
|
/// The TLS config used for this connection
|
||||||
|
opt_tls_config: Option<Arc<RustlsConfig>>,
|
||||||
|
valid_until_after_tls_update: Option<ValidUntil>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn run_socket_worker(
|
||||||
|
_sentinel: PanicSentinel,
|
||||||
|
config: Config,
|
||||||
|
state: State,
|
||||||
|
opt_tls_config: Option<Arc<ArcSwap<RustlsConfig>>>,
|
||||||
|
control_message_mesh_builder: MeshBuilder<SwarmControlMessage, Partial>,
|
||||||
|
in_message_mesh_builder: MeshBuilder<(InMessageMeta, InMessage), Partial>,
|
||||||
|
out_message_mesh_builder: MeshBuilder<(OutMessageMeta, OutMessage), Partial>,
|
||||||
|
priv_dropper: PrivilegeDropper,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
worker_index: usize,
|
||||||
|
) {
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
WORKER_INDEX.with(|index| index.set(worker_index));
|
||||||
|
|
||||||
|
let config = Rc::new(config);
|
||||||
|
let access_list = state.access_list;
|
||||||
|
|
||||||
|
let listener = create_tcp_listener(&config, priv_dropper).expect("create tcp listener");
|
||||||
|
|
||||||
|
::log::info!("created tcp listener");
|
||||||
|
|
||||||
|
let (control_message_senders, _) = control_message_mesh_builder
|
||||||
|
.join(Role::Producer)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let control_message_senders = Rc::new(control_message_senders);
|
||||||
|
|
||||||
|
let (in_message_senders, _) = in_message_mesh_builder.join(Role::Producer).await.unwrap();
|
||||||
|
let in_message_senders = Rc::new(in_message_senders);
|
||||||
|
|
||||||
|
let tq_prioritized = executor().create_task_queue(
|
||||||
|
Shares::Static(100),
|
||||||
|
Latency::Matters(Duration::from_millis(1)),
|
||||||
|
"prioritized",
|
||||||
|
);
|
||||||
|
let tq_regular =
|
||||||
|
executor().create_task_queue(Shares::Static(1), Latency::NotImportant, "regular");
|
||||||
|
|
||||||
|
let (_, mut out_message_receivers) =
|
||||||
|
out_message_mesh_builder.join(Role::Consumer).await.unwrap();
|
||||||
|
let out_message_consumer_id = ConsumerId(
|
||||||
|
out_message_receivers
|
||||||
|
.consumer_id()
|
||||||
|
.unwrap()
|
||||||
|
.try_into()
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
::log::info!("joined channels");
|
||||||
|
|
||||||
|
let connection_handles = Rc::new(RefCell::new(ConnectionHandles::default()));
|
||||||
|
|
||||||
|
// Periodically clean connections
|
||||||
|
TimerActionRepeat::repeat_into(
|
||||||
|
enclose!((config, connection_handles, opt_tls_config) move || {
|
||||||
|
clean_connections(
|
||||||
|
config.clone(),
|
||||||
|
connection_handles.clone(),
|
||||||
|
server_start_instant,
|
||||||
|
opt_tls_config.clone(),
|
||||||
|
)
|
||||||
|
}),
|
||||||
|
tq_prioritized,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
for (_, out_message_receiver) in out_message_receivers.streams() {
|
||||||
|
spawn_local_into(
|
||||||
|
receive_out_messages(out_message_receiver, connection_handles.clone()),
|
||||||
|
tq_regular,
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.detach();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut incoming = listener.incoming();
|
||||||
|
|
||||||
|
while let Some(stream) = incoming.next().await {
|
||||||
|
match stream {
|
||||||
|
Err(err) => {
|
||||||
|
::log::error!("accept connection: {:#}", err);
|
||||||
|
}
|
||||||
|
Ok(stream) => {
|
||||||
|
let ip_version = match stream.peer_addr() {
|
||||||
|
Ok(addr) => IpVersion::canonical_from_ip(addr.ip()),
|
||||||
|
Err(err) => {
|
||||||
|
::log::info!("could not extract ip version (v4 or v6): {:#}", err);
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let (out_message_sender, out_message_receiver) = new_bounded(LOCAL_CHANNEL_SIZE);
|
||||||
|
let out_message_sender = Rc::new(out_message_sender);
|
||||||
|
|
||||||
|
let (close_conn_sender, close_conn_receiver) = new_bounded(1);
|
||||||
|
|
||||||
|
let connection_valid_until = Rc::new(RefCell::new(ValidUntil::new(
|
||||||
|
server_start_instant,
|
||||||
|
config.cleaning.max_connection_idle,
|
||||||
|
)));
|
||||||
|
|
||||||
|
let connection_handle = ConnectionHandle {
|
||||||
|
close_conn_sender,
|
||||||
|
out_message_sender: out_message_sender.clone(),
|
||||||
|
valid_until: connection_valid_until.clone(),
|
||||||
|
opt_tls_config: opt_tls_config.as_ref().map(|c| c.load_full()),
|
||||||
|
valid_until_after_tls_update: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let connection_id = connection_handles.borrow_mut().insert(connection_handle);
|
||||||
|
|
||||||
|
spawn_local_into(
|
||||||
|
enclose!((
|
||||||
|
config,
|
||||||
|
access_list,
|
||||||
|
in_message_senders,
|
||||||
|
connection_valid_until,
|
||||||
|
opt_tls_config,
|
||||||
|
control_message_senders,
|
||||||
|
connection_handles
|
||||||
|
) async move {
|
||||||
|
let runner = ConnectionRunner {
|
||||||
|
config,
|
||||||
|
access_list,
|
||||||
|
in_message_senders,
|
||||||
|
tq_prioritized,
|
||||||
|
tq_regular,
|
||||||
|
connection_valid_until,
|
||||||
|
out_message_sender,
|
||||||
|
out_message_receiver,
|
||||||
|
close_conn_receiver,
|
||||||
|
server_start_instant,
|
||||||
|
out_message_consumer_id,
|
||||||
|
connection_id,
|
||||||
|
opt_tls_config,
|
||||||
|
ip_version
|
||||||
|
};
|
||||||
|
|
||||||
|
runner.run(control_message_senders, stream).await;
|
||||||
|
|
||||||
|
connection_handles.borrow_mut().remove(connection_id);
|
||||||
|
}),
|
||||||
|
tq_regular,
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
.detach();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn clean_connections(
|
||||||
|
config: Rc<Config>,
|
||||||
|
connection_slab: Rc<RefCell<ConnectionHandles>>,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
opt_tls_config: Option<Arc<ArcSwap<RustlsConfig>>>,
|
||||||
|
) -> Option<Duration> {
|
||||||
|
let now = server_start_instant.seconds_elapsed();
|
||||||
|
let opt_current_tls_config = opt_tls_config.map(|c| c.load_full());
|
||||||
|
|
||||||
|
connection_slab.borrow_mut().retain(|_, reference| {
|
||||||
|
let mut keep = true;
|
||||||
|
|
||||||
|
// Handle case when connection runs on an old TLS certificate
|
||||||
|
if let Some(valid_until) = reference.valid_until_after_tls_update {
|
||||||
|
if !valid_until.valid(now) {
|
||||||
|
keep = false;
|
||||||
|
}
|
||||||
|
} else if let Some(false) = opt_current_tls_config
|
||||||
|
.as_ref()
|
||||||
|
.zip(reference.opt_tls_config.as_ref())
|
||||||
|
.map(|(a, b)| Arc::ptr_eq(a, b))
|
||||||
|
{
|
||||||
|
reference.valid_until_after_tls_update = Some(ValidUntil::new(
|
||||||
|
server_start_instant,
|
||||||
|
config.cleaning.close_after_tls_update_grace_period,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
keep &= reference.valid_until.borrow().valid(now);
|
||||||
|
|
||||||
|
if keep {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
if let Err(err) = reference.close_conn_sender.try_send(()) {
|
||||||
|
::log::info!("couldn't tell connection to close: {:#}", err);
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
::log::info!(
|
||||||
|
"cleaned connections in worker {}, {} references remaining",
|
||||||
|
WORKER_INDEX.get(),
|
||||||
|
connection_slab.borrow_mut().len()
|
||||||
|
);
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
{
|
||||||
|
// Increment gauges by zero to prevent them from being removed due to
|
||||||
|
// idleness
|
||||||
|
|
||||||
|
let worker_index = WORKER_INDEX.with(|index| index.get()).to_string();
|
||||||
|
|
||||||
|
if config.network.address.is_ipv4() || !config.network.only_ipv6 {
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_active_connections",
|
||||||
|
0.0,
|
||||||
|
"ip_version" => "4",
|
||||||
|
"worker_index" => worker_index.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if config.network.address.is_ipv6() {
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_active_connections",
|
||||||
|
0.0,
|
||||||
|
"ip_version" => "6",
|
||||||
|
"worker_index" => worker_index,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Duration::from_secs(
|
||||||
|
config.cleaning.connection_cleaning_interval,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn receive_out_messages(
|
||||||
|
mut out_message_receiver: ConnectedReceiver<(OutMessageMeta, OutMessage)>,
|
||||||
|
connection_references: Rc<RefCell<ConnectionHandles>>,
|
||||||
|
) {
|
||||||
|
let connection_references = &connection_references;
|
||||||
|
|
||||||
|
while let Some((meta, out_message)) = out_message_receiver.next().await {
|
||||||
|
if let Some(reference) = connection_references.borrow().get(meta.connection_id) {
|
||||||
|
match reference.out_message_sender.try_send((meta, out_message)) {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(GlommioError::Closed(_)) => {}
|
||||||
|
Err(GlommioError::WouldBlock(_)) => {}
|
||||||
|
Err(err) => {
|
||||||
|
::log::debug!(
|
||||||
|
"Couldn't send out_message from shared channel to local receiver: {:?}",
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_tcp_listener(
|
||||||
|
config: &Config,
|
||||||
|
priv_dropper: PrivilegeDropper,
|
||||||
|
) -> anyhow::Result<TcpListener> {
|
||||||
|
let domain = if config.network.address.is_ipv4() {
|
||||||
|
socket2::Domain::IPV4
|
||||||
|
} else {
|
||||||
|
socket2::Domain::IPV6
|
||||||
|
};
|
||||||
|
|
||||||
|
::log::info!("creating socket..");
|
||||||
|
|
||||||
|
let socket = socket2::Socket::new(domain, socket2::Type::STREAM, Some(socket2::Protocol::TCP))
|
||||||
|
.with_context(|| "create socket")?;
|
||||||
|
|
||||||
|
if config.network.only_ipv6 {
|
||||||
|
::log::info!("setting socket to ipv6 only..");
|
||||||
|
|
||||||
|
socket
|
||||||
|
.set_only_v6(true)
|
||||||
|
.with_context(|| "socket: set only ipv6")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
::log::info!("setting SO_REUSEPORT on socket..");
|
||||||
|
|
||||||
|
socket
|
||||||
|
.set_reuse_port(true)
|
||||||
|
.with_context(|| "socket: set reuse port")?;
|
||||||
|
|
||||||
|
::log::info!("binding socket..");
|
||||||
|
|
||||||
|
socket
|
||||||
|
.bind(&config.network.address.into())
|
||||||
|
.with_context(|| format!("socket: bind to {}", config.network.address))?;
|
||||||
|
|
||||||
|
::log::info!("listening on socket..");
|
||||||
|
|
||||||
|
socket
|
||||||
|
.listen(config.network.tcp_backlog)
|
||||||
|
.with_context(|| format!("socket: listen {}", config.network.address))?;
|
||||||
|
|
||||||
|
::log::info!("running PrivilegeDropper::after_socket_creation..");
|
||||||
|
|
||||||
|
priv_dropper.after_socket_creation()?;
|
||||||
|
|
||||||
|
::log::info!("casting socket to glommio TcpListener..");
|
||||||
|
|
||||||
|
Ok(unsafe { TcpListener::from_raw_fd(socket.into_raw_fd()) })
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
fn ip_version_to_metrics_str(ip_version: IpVersion) -> &'static str {
|
||||||
|
match ip_version {
|
||||||
|
IpVersion::V4 => "4",
|
||||||
|
IpVersion::V6 => "6",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn calculate_in_message_consumer_index(config: &Config, info_hash: InfoHash) -> usize {
|
||||||
|
(info_hash.0[0] as usize) % config.swarm_workers
|
||||||
|
}
|
||||||
|
|
@ -1,541 +0,0 @@
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::rc::Rc;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
|
||||||
use futures::StreamExt;
|
|
||||||
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
|
|
||||||
use glommio::enclose;
|
|
||||||
use glommio::prelude::*;
|
|
||||||
use glommio::timer::TimerActionRepeat;
|
|
||||||
use hashbrown::HashMap;
|
|
||||||
use rand::{rngs::SmallRng, SeedableRng};
|
|
||||||
|
|
||||||
use aquatic_common::{
|
|
||||||
extract_response_peers, IndexMap, PanicSentinel, SecondsSinceServerStart, ServerStartInstant,
|
|
||||||
};
|
|
||||||
use aquatic_ws_protocol::*;
|
|
||||||
|
|
||||||
use crate::common::*;
|
|
||||||
use crate::config::Config;
|
|
||||||
use crate::SHARED_IN_CHANNEL_SIZE;
|
|
||||||
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
thread_local! { static WORKER_INDEX: ::std::cell::Cell<usize> = Default::default() }
|
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
|
|
||||||
enum PeerStatus {
|
|
||||||
Seeding,
|
|
||||||
Leeching,
|
|
||||||
Stopped,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PeerStatus {
|
|
||||||
/// Determine peer status from announce event and number of bytes left.
|
|
||||||
///
|
|
||||||
/// Likely, the last branch will be taken most of the time.
|
|
||||||
#[inline]
|
|
||||||
fn from_event_and_bytes_left(event: AnnounceEvent, opt_bytes_left: Option<usize>) -> Self {
|
|
||||||
if let AnnounceEvent::Stopped = event {
|
|
||||||
Self::Stopped
|
|
||||||
} else if let Some(0) = opt_bytes_left {
|
|
||||||
Self::Seeding
|
|
||||||
} else {
|
|
||||||
Self::Leeching
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
|
||||||
struct Peer {
|
|
||||||
pub consumer_id: ConsumerId,
|
|
||||||
pub connection_id: ConnectionId,
|
|
||||||
pub seeder: bool,
|
|
||||||
pub valid_until: ValidUntil,
|
|
||||||
}
|
|
||||||
|
|
||||||
type PeerMap = IndexMap<PeerId, Peer>;
|
|
||||||
|
|
||||||
struct TorrentData {
|
|
||||||
pub peers: PeerMap,
|
|
||||||
pub num_seeders: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for TorrentData {
|
|
||||||
#[inline]
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
peers: Default::default(),
|
|
||||||
num_seeders: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TorrentData {
|
|
||||||
pub fn remove_peer(&mut self, peer_id: PeerId) {
|
|
||||||
if let Some(peer) = self.peers.remove(&peer_id) {
|
|
||||||
if peer.seeder {
|
|
||||||
self.num_seeders -= 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn num_leechers(&self) -> usize {
|
|
||||||
self.peers.len() - self.num_seeders
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type TorrentMap = IndexMap<InfoHash, TorrentData>;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
struct TorrentMaps {
|
|
||||||
pub ipv4: TorrentMap,
|
|
||||||
pub ipv6: TorrentMap,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TorrentMaps {
|
|
||||||
fn clean(
|
|
||||||
&mut self,
|
|
||||||
config: &Config,
|
|
||||||
access_list: &Arc<AccessListArcSwap>,
|
|
||||||
server_start_instant: ServerStartInstant,
|
|
||||||
) {
|
|
||||||
let mut access_list_cache = create_access_list_cache(access_list);
|
|
||||||
let now = server_start_instant.seconds_elapsed();
|
|
||||||
|
|
||||||
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4, now, "4");
|
|
||||||
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6, now, "6");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clean_torrent_map(
|
|
||||||
config: &Config,
|
|
||||||
access_list_cache: &mut AccessListCache,
|
|
||||||
torrent_map: &mut TorrentMap,
|
|
||||||
now: SecondsSinceServerStart,
|
|
||||||
ip_version: &'static str,
|
|
||||||
) {
|
|
||||||
let mut total_num_peers = 0u64;
|
|
||||||
|
|
||||||
torrent_map.retain(|info_hash, torrent_data| {
|
|
||||||
if !access_list_cache
|
|
||||||
.load()
|
|
||||||
.allows(config.access_list.mode, &info_hash.0)
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let num_seeders = &mut torrent_data.num_seeders;
|
|
||||||
|
|
||||||
torrent_data.peers.retain(|_, peer| {
|
|
||||||
let keep = peer.valid_until.valid(now);
|
|
||||||
|
|
||||||
if (!keep) & peer.seeder {
|
|
||||||
*num_seeders -= 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
keep
|
|
||||||
});
|
|
||||||
|
|
||||||
total_num_peers += torrent_data.peers.len() as u64;
|
|
||||||
|
|
||||||
!torrent_data.peers.is_empty()
|
|
||||||
});
|
|
||||||
|
|
||||||
torrent_map.shrink_to_fit();
|
|
||||||
|
|
||||||
let total_num_peers = total_num_peers as f64;
|
|
||||||
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
::metrics::gauge!(
|
|
||||||
"aquatic_peers",
|
|
||||||
total_num_peers,
|
|
||||||
"ip_version" => ip_version,
|
|
||||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_swarm_worker(
|
|
||||||
_sentinel: PanicSentinel,
|
|
||||||
config: Config,
|
|
||||||
state: State,
|
|
||||||
control_message_mesh_builder: MeshBuilder<SwarmControlMessage, Partial>,
|
|
||||||
in_message_mesh_builder: MeshBuilder<(InMessageMeta, InMessage), Partial>,
|
|
||||||
out_message_mesh_builder: MeshBuilder<(OutMessageMeta, OutMessage), Partial>,
|
|
||||||
server_start_instant: ServerStartInstant,
|
|
||||||
worker_index: usize,
|
|
||||||
) {
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
WORKER_INDEX.with(|index| index.set(worker_index));
|
|
||||||
|
|
||||||
let (_, mut control_message_receivers) = control_message_mesh_builder
|
|
||||||
.join(Role::Consumer)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let (_, mut in_message_receivers) = in_message_mesh_builder.join(Role::Consumer).await.unwrap();
|
|
||||||
let (out_message_senders, _) = out_message_mesh_builder.join(Role::Producer).await.unwrap();
|
|
||||||
|
|
||||||
let out_message_senders = Rc::new(out_message_senders);
|
|
||||||
|
|
||||||
let torrents = Rc::new(RefCell::new(TorrentMaps::default()));
|
|
||||||
let access_list = state.access_list;
|
|
||||||
|
|
||||||
// Periodically clean torrents
|
|
||||||
TimerActionRepeat::repeat(enclose!((config, torrents, access_list) move || {
|
|
||||||
enclose!((config, torrents, access_list) move || async move {
|
|
||||||
torrents.borrow_mut().clean(&config, &access_list, server_start_instant);
|
|
||||||
|
|
||||||
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
|
|
||||||
})()
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Periodically update torrent count metrics
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
TimerActionRepeat::repeat(enclose!((config, torrents) move || {
|
|
||||||
enclose!((config, torrents) move || async move {
|
|
||||||
let torrents = torrents.borrow_mut();
|
|
||||||
|
|
||||||
::metrics::gauge!(
|
|
||||||
"aquatic_torrents",
|
|
||||||
torrents.ipv4.len() as f64,
|
|
||||||
"ip_version" => "4",
|
|
||||||
"worker_index" => worker_index.to_string(),
|
|
||||||
);
|
|
||||||
::metrics::gauge!(
|
|
||||||
"aquatic_torrents",
|
|
||||||
torrents.ipv6.len() as f64,
|
|
||||||
"ip_version" => "6",
|
|
||||||
"worker_index" => worker_index.to_string(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Some(Duration::from_secs(config.metrics.torrent_count_update_interval))
|
|
||||||
})()
|
|
||||||
}));
|
|
||||||
|
|
||||||
let mut handles = Vec::new();
|
|
||||||
|
|
||||||
for (_, receiver) in control_message_receivers.streams() {
|
|
||||||
let handle =
|
|
||||||
spawn_local(handle_control_message_stream(torrents.clone(), receiver)).detach();
|
|
||||||
|
|
||||||
handles.push(handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (_, receiver) in in_message_receivers.streams() {
|
|
||||||
let handle = spawn_local(handle_request_stream(
|
|
||||||
config.clone(),
|
|
||||||
torrents.clone(),
|
|
||||||
server_start_instant,
|
|
||||||
out_message_senders.clone(),
|
|
||||||
receiver,
|
|
||||||
))
|
|
||||||
.detach();
|
|
||||||
|
|
||||||
handles.push(handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
for handle in handles {
|
|
||||||
handle.await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_control_message_stream<S>(torrents: Rc<RefCell<TorrentMaps>>, mut stream: S)
|
|
||||||
where
|
|
||||||
S: futures_lite::Stream<Item = SwarmControlMessage> + ::std::marker::Unpin,
|
|
||||||
{
|
|
||||||
while let Some(message) = stream.next().await {
|
|
||||||
match message {
|
|
||||||
SwarmControlMessage::ConnectionClosed {
|
|
||||||
info_hash,
|
|
||||||
peer_id,
|
|
||||||
ip_version,
|
|
||||||
} => {
|
|
||||||
::log::debug!("Removing peer from torrents because connection was closed");
|
|
||||||
|
|
||||||
if let IpVersion::V4 = ip_version {
|
|
||||||
if let Some(torrent_data) = torrents.borrow_mut().ipv4.get_mut(&info_hash) {
|
|
||||||
torrent_data.remove_peer(peer_id);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if let Some(torrent_data) = torrents.borrow_mut().ipv6.get_mut(&info_hash) {
|
|
||||||
torrent_data.remove_peer(peer_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_request_stream<S>(
|
|
||||||
config: Config,
|
|
||||||
torrents: Rc<RefCell<TorrentMaps>>,
|
|
||||||
server_start_instant: ServerStartInstant,
|
|
||||||
out_message_senders: Rc<Senders<(OutMessageMeta, OutMessage)>>,
|
|
||||||
stream: S,
|
|
||||||
) where
|
|
||||||
S: futures_lite::Stream<Item = (InMessageMeta, InMessage)> + ::std::marker::Unpin,
|
|
||||||
{
|
|
||||||
let rng = Rc::new(RefCell::new(SmallRng::from_entropy()));
|
|
||||||
|
|
||||||
let max_peer_age = config.cleaning.max_peer_age;
|
|
||||||
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(
|
|
||||||
server_start_instant,
|
|
||||||
max_peer_age,
|
|
||||||
)));
|
|
||||||
|
|
||||||
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
|
|
||||||
enclose!((peer_valid_until) move || async move {
|
|
||||||
*peer_valid_until.borrow_mut() = ValidUntil::new(server_start_instant, max_peer_age);
|
|
||||||
|
|
||||||
Some(Duration::from_secs(1))
|
|
||||||
})()
|
|
||||||
}));
|
|
||||||
|
|
||||||
let config = &config;
|
|
||||||
let torrents = &torrents;
|
|
||||||
let peer_valid_until = &peer_valid_until;
|
|
||||||
let rng = &rng;
|
|
||||||
let out_message_senders = &out_message_senders;
|
|
||||||
|
|
||||||
stream
|
|
||||||
.for_each_concurrent(
|
|
||||||
SHARED_IN_CHANNEL_SIZE,
|
|
||||||
move |(meta, in_message)| async move {
|
|
||||||
let mut out_messages = Vec::new();
|
|
||||||
|
|
||||||
match in_message {
|
|
||||||
InMessage::AnnounceRequest(request) => handle_announce_request(
|
|
||||||
&config,
|
|
||||||
&mut rng.borrow_mut(),
|
|
||||||
&mut torrents.borrow_mut(),
|
|
||||||
&mut out_messages,
|
|
||||||
peer_valid_until.borrow().to_owned(),
|
|
||||||
meta,
|
|
||||||
request,
|
|
||||||
),
|
|
||||||
InMessage::ScrapeRequest(request) => handle_scrape_request(
|
|
||||||
&config,
|
|
||||||
&mut torrents.borrow_mut(),
|
|
||||||
&mut out_messages,
|
|
||||||
meta,
|
|
||||||
request,
|
|
||||||
),
|
|
||||||
};
|
|
||||||
|
|
||||||
for (meta, out_message) in out_messages.drain(..) {
|
|
||||||
out_message_senders
|
|
||||||
.send_to(meta.out_message_consumer_id.0 as usize, (meta, out_message))
|
|
||||||
.await
|
|
||||||
.expect("failed sending out_message to socket worker");
|
|
||||||
|
|
||||||
::log::debug!("swarm worker sent OutMessage to socket worker");
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_announce_request(
|
|
||||||
config: &Config,
|
|
||||||
rng: &mut SmallRng,
|
|
||||||
torrent_maps: &mut TorrentMaps,
|
|
||||||
out_messages: &mut Vec<(OutMessageMeta, OutMessage)>,
|
|
||||||
valid_until: ValidUntil,
|
|
||||||
request_sender_meta: InMessageMeta,
|
|
||||||
request: AnnounceRequest,
|
|
||||||
) {
|
|
||||||
let (torrent_data, ip_version): (&mut TorrentData, &'static str) =
|
|
||||||
if let IpVersion::V4 = request_sender_meta.ip_version {
|
|
||||||
(torrent_maps.ipv4.entry(request.info_hash).or_default(), "4")
|
|
||||||
} else {
|
|
||||||
(torrent_maps.ipv6.entry(request.info_hash).or_default(), "6")
|
|
||||||
};
|
|
||||||
|
|
||||||
// If there is already a peer with this peer_id, check that connection id
|
|
||||||
// is same as that of request sender. Otherwise, ignore request. Since
|
|
||||||
// peers have access to each others peer_id's, they could send requests
|
|
||||||
// using them, causing all sorts of issues.
|
|
||||||
if let Some(previous_peer) = torrent_data.peers.get(&request.peer_id) {
|
|
||||||
if request_sender_meta.connection_id != previous_peer.connection_id {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
::log::trace!("received request from {:?}", request_sender_meta);
|
|
||||||
|
|
||||||
// Insert/update/remove peer who sent this request
|
|
||||||
{
|
|
||||||
let peer_status = PeerStatus::from_event_and_bytes_left(
|
|
||||||
request.event.unwrap_or_default(),
|
|
||||||
request.bytes_left,
|
|
||||||
);
|
|
||||||
|
|
||||||
let opt_removed_peer = match peer_status {
|
|
||||||
PeerStatus::Leeching => {
|
|
||||||
let peer = Peer {
|
|
||||||
connection_id: request_sender_meta.connection_id,
|
|
||||||
consumer_id: request_sender_meta.out_message_consumer_id,
|
|
||||||
seeder: false,
|
|
||||||
valid_until,
|
|
||||||
};
|
|
||||||
|
|
||||||
torrent_data.peers.insert(request.peer_id, peer)
|
|
||||||
}
|
|
||||||
PeerStatus::Seeding => {
|
|
||||||
torrent_data.num_seeders += 1;
|
|
||||||
|
|
||||||
let peer = Peer {
|
|
||||||
connection_id: request_sender_meta.connection_id,
|
|
||||||
consumer_id: request_sender_meta.out_message_consumer_id,
|
|
||||||
seeder: true,
|
|
||||||
valid_until,
|
|
||||||
};
|
|
||||||
|
|
||||||
torrent_data.peers.insert(request.peer_id, peer)
|
|
||||||
}
|
|
||||||
PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(&Peer { seeder: true, .. }) = opt_removed_peer.as_ref() {
|
|
||||||
torrent_data.num_seeders -= 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "metrics")]
|
|
||||||
match peer_status {
|
|
||||||
PeerStatus::Stopped if opt_removed_peer.is_some() => {
|
|
||||||
::metrics::decrement_gauge!(
|
|
||||||
"aquatic_peers",
|
|
||||||
1.0,
|
|
||||||
"ip_version" => ip_version,
|
|
||||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
PeerStatus::Leeching | PeerStatus::Seeding if opt_removed_peer.is_none() => {
|
|
||||||
::metrics::increment_gauge!(
|
|
||||||
"aquatic_peers",
|
|
||||||
1.0,
|
|
||||||
"ip_version" => ip_version,
|
|
||||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If peer sent offers, send them on to random peers
|
|
||||||
if let Some(offers) = request.offers {
|
|
||||||
// FIXME: config: also maybe check this when parsing request
|
|
||||||
let max_num_peers_to_take = offers.len().min(config.protocol.max_offers);
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn f(peer: &Peer) -> Peer {
|
|
||||||
*peer
|
|
||||||
}
|
|
||||||
|
|
||||||
let offer_receivers: Vec<Peer> = extract_response_peers(
|
|
||||||
rng,
|
|
||||||
&torrent_data.peers,
|
|
||||||
max_num_peers_to_take,
|
|
||||||
request.peer_id,
|
|
||||||
f,
|
|
||||||
);
|
|
||||||
|
|
||||||
for (offer, offer_receiver) in offers.into_iter().zip(offer_receivers) {
|
|
||||||
let offer_out_message = OfferOutMessage {
|
|
||||||
action: AnnounceAction::Announce,
|
|
||||||
info_hash: request.info_hash,
|
|
||||||
peer_id: request.peer_id,
|
|
||||||
offer: offer.offer,
|
|
||||||
offer_id: offer.offer_id,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = OutMessageMeta {
|
|
||||||
out_message_consumer_id: offer_receiver.consumer_id,
|
|
||||||
connection_id: offer_receiver.connection_id,
|
|
||||||
pending_scrape_id: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
out_messages.push((meta, OutMessage::OfferOutMessage(offer_out_message)));
|
|
||||||
::log::trace!("sending middleman offer to {:?}", meta);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If peer sent answer, send it on to relevant peer
|
|
||||||
if let (Some(answer), Some(answer_receiver_id), Some(offer_id)) = (
|
|
||||||
request.answer,
|
|
||||||
request.answer_to_peer_id,
|
|
||||||
request.answer_offer_id,
|
|
||||||
) {
|
|
||||||
if let Some(answer_receiver) = torrent_data.peers.get(&answer_receiver_id) {
|
|
||||||
let answer_out_message = AnswerOutMessage {
|
|
||||||
action: AnnounceAction::Announce,
|
|
||||||
peer_id: request.peer_id,
|
|
||||||
info_hash: request.info_hash,
|
|
||||||
answer,
|
|
||||||
offer_id,
|
|
||||||
};
|
|
||||||
|
|
||||||
let meta = OutMessageMeta {
|
|
||||||
out_message_consumer_id: answer_receiver.consumer_id,
|
|
||||||
connection_id: answer_receiver.connection_id,
|
|
||||||
pending_scrape_id: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
out_messages.push((meta, OutMessage::AnswerOutMessage(answer_out_message)));
|
|
||||||
::log::trace!("sending middleman answer to {:?}", meta);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let out_message = OutMessage::AnnounceResponse(AnnounceResponse {
|
|
||||||
action: AnnounceAction::Announce,
|
|
||||||
info_hash: request.info_hash,
|
|
||||||
complete: torrent_data.num_seeders,
|
|
||||||
incomplete: torrent_data.num_leechers(),
|
|
||||||
announce_interval: config.protocol.peer_announce_interval,
|
|
||||||
});
|
|
||||||
|
|
||||||
out_messages.push((request_sender_meta.into(), out_message));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_scrape_request(
|
|
||||||
config: &Config,
|
|
||||||
torrent_maps: &mut TorrentMaps,
|
|
||||||
out_messages: &mut Vec<(OutMessageMeta, OutMessage)>,
|
|
||||||
meta: InMessageMeta,
|
|
||||||
request: ScrapeRequest,
|
|
||||||
) {
|
|
||||||
let info_hashes = if let Some(info_hashes) = request.info_hashes {
|
|
||||||
info_hashes.as_vec()
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let num_to_take = info_hashes.len().min(config.protocol.max_scrape_torrents);
|
|
||||||
|
|
||||||
let mut out_message = ScrapeResponse {
|
|
||||||
action: ScrapeAction::Scrape,
|
|
||||||
files: HashMap::with_capacity(num_to_take),
|
|
||||||
};
|
|
||||||
|
|
||||||
let torrent_map: &mut TorrentMap = if let IpVersion::V4 = meta.ip_version {
|
|
||||||
&mut torrent_maps.ipv4
|
|
||||||
} else {
|
|
||||||
&mut torrent_maps.ipv6
|
|
||||||
};
|
|
||||||
|
|
||||||
for info_hash in info_hashes.into_iter().take(num_to_take) {
|
|
||||||
if let Some(torrent_data) = torrent_map.get(&info_hash) {
|
|
||||||
let stats = ScrapeStatistics {
|
|
||||||
complete: torrent_data.num_seeders,
|
|
||||||
downloaded: 0, // No implementation planned
|
|
||||||
incomplete: torrent_data.num_leechers(),
|
|
||||||
};
|
|
||||||
|
|
||||||
out_message.files.insert(info_hash, stats);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out_messages.push((meta.into(), OutMessage::ScrapeResponse(out_message)));
|
|
||||||
}
|
|
||||||
183
crates/ws/src/workers/swarm/mod.rs
Normal file
183
crates/ws/src/workers/swarm/mod.rs
Normal file
|
|
@ -0,0 +1,183 @@
|
||||||
|
mod storage;
|
||||||
|
|
||||||
|
use std::cell::RefCell;
|
||||||
|
use std::rc::Rc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
|
||||||
|
use glommio::enclose;
|
||||||
|
use glommio::prelude::*;
|
||||||
|
use glommio::timer::TimerActionRepeat;
|
||||||
|
use rand::{rngs::SmallRng, SeedableRng};
|
||||||
|
|
||||||
|
use aquatic_common::{PanicSentinel, ServerStartInstant};
|
||||||
|
use aquatic_ws_protocol::*;
|
||||||
|
|
||||||
|
use crate::common::*;
|
||||||
|
use crate::config::Config;
|
||||||
|
use crate::SHARED_IN_CHANNEL_SIZE;
|
||||||
|
|
||||||
|
use self::storage::TorrentMaps;
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
thread_local! { static WORKER_INDEX: ::std::cell::Cell<usize> = Default::default() }
|
||||||
|
|
||||||
|
pub async fn run_swarm_worker(
|
||||||
|
_sentinel: PanicSentinel,
|
||||||
|
config: Config,
|
||||||
|
state: State,
|
||||||
|
control_message_mesh_builder: MeshBuilder<SwarmControlMessage, Partial>,
|
||||||
|
in_message_mesh_builder: MeshBuilder<(InMessageMeta, InMessage), Partial>,
|
||||||
|
out_message_mesh_builder: MeshBuilder<(OutMessageMeta, OutMessage), Partial>,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
worker_index: usize,
|
||||||
|
) {
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
WORKER_INDEX.with(|index| index.set(worker_index));
|
||||||
|
|
||||||
|
let (_, mut control_message_receivers) = control_message_mesh_builder
|
||||||
|
.join(Role::Consumer)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let (_, mut in_message_receivers) = in_message_mesh_builder.join(Role::Consumer).await.unwrap();
|
||||||
|
let (out_message_senders, _) = out_message_mesh_builder.join(Role::Producer).await.unwrap();
|
||||||
|
|
||||||
|
let out_message_senders = Rc::new(out_message_senders);
|
||||||
|
|
||||||
|
let torrents = Rc::new(RefCell::new(TorrentMaps::default()));
|
||||||
|
let access_list = state.access_list;
|
||||||
|
|
||||||
|
// Periodically clean torrents
|
||||||
|
TimerActionRepeat::repeat(enclose!((config, torrents, access_list) move || {
|
||||||
|
enclose!((config, torrents, access_list) move || async move {
|
||||||
|
torrents.borrow_mut().clean(&config, &access_list, server_start_instant);
|
||||||
|
|
||||||
|
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
|
||||||
|
})()
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Periodically update torrent count metrics
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
TimerActionRepeat::repeat(enclose!((config, torrents) move || {
|
||||||
|
enclose!((config, torrents) move || async move {
|
||||||
|
torrents.borrow_mut().update_torrent_count_metrics();
|
||||||
|
|
||||||
|
Some(Duration::from_secs(config.metrics.torrent_count_update_interval))
|
||||||
|
})()
|
||||||
|
}));
|
||||||
|
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
|
||||||
|
for (_, receiver) in control_message_receivers.streams() {
|
||||||
|
let handle =
|
||||||
|
spawn_local(handle_control_message_stream(torrents.clone(), receiver)).detach();
|
||||||
|
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (_, receiver) in in_message_receivers.streams() {
|
||||||
|
let handle = spawn_local(handle_request_stream(
|
||||||
|
config.clone(),
|
||||||
|
torrents.clone(),
|
||||||
|
server_start_instant,
|
||||||
|
out_message_senders.clone(),
|
||||||
|
receiver,
|
||||||
|
))
|
||||||
|
.detach();
|
||||||
|
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_control_message_stream<S>(torrents: Rc<RefCell<TorrentMaps>>, mut stream: S)
|
||||||
|
where
|
||||||
|
S: futures_lite::Stream<Item = SwarmControlMessage> + ::std::marker::Unpin,
|
||||||
|
{
|
||||||
|
while let Some(message) = stream.next().await {
|
||||||
|
match message {
|
||||||
|
SwarmControlMessage::ConnectionClosed {
|
||||||
|
info_hash,
|
||||||
|
peer_id,
|
||||||
|
ip_version,
|
||||||
|
} => {
|
||||||
|
torrents
|
||||||
|
.borrow_mut()
|
||||||
|
.handle_connection_closed(info_hash, peer_id, ip_version);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_request_stream<S>(
|
||||||
|
config: Config,
|
||||||
|
torrents: Rc<RefCell<TorrentMaps>>,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
out_message_senders: Rc<Senders<(OutMessageMeta, OutMessage)>>,
|
||||||
|
stream: S,
|
||||||
|
) where
|
||||||
|
S: futures_lite::Stream<Item = (InMessageMeta, InMessage)> + ::std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let rng = Rc::new(RefCell::new(SmallRng::from_entropy()));
|
||||||
|
|
||||||
|
let max_peer_age = config.cleaning.max_peer_age;
|
||||||
|
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(
|
||||||
|
server_start_instant,
|
||||||
|
max_peer_age,
|
||||||
|
)));
|
||||||
|
|
||||||
|
// Periodically update peer_valid_until
|
||||||
|
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
|
||||||
|
enclose!((peer_valid_until) move || async move {
|
||||||
|
*peer_valid_until.borrow_mut() = ValidUntil::new(server_start_instant, max_peer_age);
|
||||||
|
|
||||||
|
Some(Duration::from_secs(1))
|
||||||
|
})()
|
||||||
|
}));
|
||||||
|
|
||||||
|
let config = &config;
|
||||||
|
let torrents = &torrents;
|
||||||
|
let peer_valid_until = &peer_valid_until;
|
||||||
|
let rng = &rng;
|
||||||
|
let out_message_senders = &out_message_senders;
|
||||||
|
|
||||||
|
stream
|
||||||
|
.for_each_concurrent(
|
||||||
|
SHARED_IN_CHANNEL_SIZE,
|
||||||
|
move |(meta, in_message)| async move {
|
||||||
|
let mut out_messages = Vec::new();
|
||||||
|
|
||||||
|
match in_message {
|
||||||
|
InMessage::AnnounceRequest(request) => {
|
||||||
|
torrents.borrow_mut().handle_announce_request(
|
||||||
|
&config,
|
||||||
|
&mut rng.borrow_mut(),
|
||||||
|
&mut out_messages,
|
||||||
|
server_start_instant,
|
||||||
|
peer_valid_until.borrow().to_owned(),
|
||||||
|
meta,
|
||||||
|
request,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
InMessage::ScrapeRequest(request) => torrents
|
||||||
|
.borrow_mut()
|
||||||
|
.handle_scrape_request(&config, &mut out_messages, meta, request),
|
||||||
|
};
|
||||||
|
|
||||||
|
for (meta, out_message) in out_messages {
|
||||||
|
out_message_senders
|
||||||
|
.send_to(meta.out_message_consumer_id.0 as usize, (meta, out_message))
|
||||||
|
.await
|
||||||
|
.expect("failed sending out_message to socket worker");
|
||||||
|
|
||||||
|
::log::debug!("swarm worker sent OutMessage to socket worker");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
488
crates/ws/src/workers/swarm/storage.rs
Normal file
488
crates/ws/src/workers/swarm/storage.rs
Normal file
|
|
@ -0,0 +1,488 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
||||||
|
use hashbrown::HashMap;
|
||||||
|
use rand::rngs::SmallRng;
|
||||||
|
|
||||||
|
use aquatic_common::{
|
||||||
|
extract_response_peers, IndexMap, SecondsSinceServerStart, ServerStartInstant,
|
||||||
|
};
|
||||||
|
use aquatic_ws_protocol::*;
|
||||||
|
|
||||||
|
use crate::common::*;
|
||||||
|
use crate::config::Config;
|
||||||
|
use crate::workers::swarm::WORKER_INDEX;
|
||||||
|
|
||||||
|
type TorrentMap = IndexMap<InfoHash, TorrentData>;
|
||||||
|
type PeerMap = IndexMap<PeerId, Peer>;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct TorrentMaps {
|
||||||
|
ipv4: TorrentMap,
|
||||||
|
ipv6: TorrentMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TorrentMaps {
|
||||||
|
pub fn handle_announce_request(
|
||||||
|
&mut self,
|
||||||
|
config: &Config,
|
||||||
|
rng: &mut SmallRng,
|
||||||
|
out_messages: &mut Vec<(OutMessageMeta, OutMessage)>,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
valid_until: ValidUntil,
|
||||||
|
request_sender_meta: InMessageMeta,
|
||||||
|
request: AnnounceRequest,
|
||||||
|
) {
|
||||||
|
let (torrent_data, ip_version): (&mut TorrentData, &'static str) =
|
||||||
|
if let IpVersion::V4 = request_sender_meta.ip_version {
|
||||||
|
(self.ipv4.entry(request.info_hash).or_default(), "4")
|
||||||
|
} else {
|
||||||
|
(self.ipv6.entry(request.info_hash).or_default(), "6")
|
||||||
|
};
|
||||||
|
|
||||||
|
// If there is already a peer with this peer_id, check that connection id
|
||||||
|
// is same as that of request sender. Otherwise, ignore request. Since
|
||||||
|
// peers have access to each others peer_id's, they could send requests
|
||||||
|
// using them, causing all sorts of issues.
|
||||||
|
if let Some(previous_peer) = torrent_data.peers.get(&request.peer_id) {
|
||||||
|
if request_sender_meta.connection_id != previous_peer.connection_id {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
::log::trace!("received request from {:?}", request_sender_meta);
|
||||||
|
|
||||||
|
// Insert/update/remove peer who sent this request
|
||||||
|
{
|
||||||
|
let peer_status = PeerStatus::from_event_and_bytes_left(
|
||||||
|
request.event.unwrap_or_default(),
|
||||||
|
request.bytes_left,
|
||||||
|
);
|
||||||
|
|
||||||
|
match torrent_data.peers.entry(request.peer_id) {
|
||||||
|
::indexmap::map::Entry::Occupied(mut entry) => match peer_status {
|
||||||
|
PeerStatus::Leeching => {
|
||||||
|
let peer = entry.get_mut();
|
||||||
|
|
||||||
|
if peer.seeder {
|
||||||
|
torrent_data.num_seeders -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
peer.seeder = false;
|
||||||
|
peer.valid_until = valid_until;
|
||||||
|
}
|
||||||
|
PeerStatus::Seeding => {
|
||||||
|
let peer = entry.get_mut();
|
||||||
|
|
||||||
|
if !peer.seeder {
|
||||||
|
torrent_data.num_seeders += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
peer.seeder = true;
|
||||||
|
peer.valid_until = valid_until;
|
||||||
|
}
|
||||||
|
PeerStatus::Stopped => {
|
||||||
|
let peer = entry.remove();
|
||||||
|
|
||||||
|
if peer.seeder {
|
||||||
|
torrent_data.num_seeders -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::decrement_gauge!(
|
||||||
|
"aquatic_peers",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => ip_version,
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
::indexmap::map::Entry::Vacant(entry) => match peer_status {
|
||||||
|
PeerStatus::Leeching => {
|
||||||
|
let peer = Peer {
|
||||||
|
connection_id: request_sender_meta.connection_id,
|
||||||
|
consumer_id: request_sender_meta.out_message_consumer_id,
|
||||||
|
seeder: false,
|
||||||
|
valid_until,
|
||||||
|
expecting_answers: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
entry.insert(peer);
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_peers",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => ip_version,
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
PeerStatus::Seeding => {
|
||||||
|
torrent_data.num_seeders += 1;
|
||||||
|
|
||||||
|
let peer = Peer {
|
||||||
|
connection_id: request_sender_meta.connection_id,
|
||||||
|
consumer_id: request_sender_meta.out_message_consumer_id,
|
||||||
|
seeder: true,
|
||||||
|
valid_until,
|
||||||
|
expecting_answers: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
entry.insert(peer);
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::increment_gauge!(
|
||||||
|
"aquatic_peers",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => ip_version,
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
PeerStatus::Stopped => return,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// If peer sent offers, send them on to random peers
|
||||||
|
if let Some(offers) = request.offers {
|
||||||
|
// FIXME: config: also maybe check this when parsing request
|
||||||
|
let max_num_peers_to_take = offers.len().min(config.protocol.max_offers);
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn convert_offer_receiver_peer(
|
||||||
|
peer_id: &PeerId,
|
||||||
|
peer: &Peer,
|
||||||
|
) -> (PeerId, ConnectionId, ConsumerId) {
|
||||||
|
(*peer_id, peer.connection_id, peer.consumer_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
let offer_receivers: Vec<(PeerId, ConnectionId, ConsumerId)> = extract_response_peers(
|
||||||
|
rng,
|
||||||
|
&torrent_data.peers,
|
||||||
|
max_num_peers_to_take,
|
||||||
|
request.peer_id,
|
||||||
|
convert_offer_receiver_peer,
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(peer) = torrent_data.peers.get_mut(&request.peer_id) {
|
||||||
|
for (
|
||||||
|
offer,
|
||||||
|
(
|
||||||
|
offer_receiver_peer_id,
|
||||||
|
offer_receiver_connection_id,
|
||||||
|
offer_receiver_consumer_id,
|
||||||
|
),
|
||||||
|
) in offers.into_iter().zip(offer_receivers)
|
||||||
|
{
|
||||||
|
let offer_out_message = OfferOutMessage {
|
||||||
|
action: AnnounceAction::Announce,
|
||||||
|
info_hash: request.info_hash,
|
||||||
|
peer_id: request.peer_id,
|
||||||
|
offer: offer.offer,
|
||||||
|
offer_id: offer.offer_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
let meta = OutMessageMeta {
|
||||||
|
out_message_consumer_id: offer_receiver_consumer_id,
|
||||||
|
connection_id: offer_receiver_connection_id,
|
||||||
|
pending_scrape_id: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
out_messages.push((meta, OutMessage::OfferOutMessage(offer_out_message)));
|
||||||
|
::log::trace!("sending OfferOutMessage to {:?}", meta);
|
||||||
|
|
||||||
|
peer.expecting_answers.insert(
|
||||||
|
ExpectingAnswer {
|
||||||
|
from_peer_id: offer_receiver_peer_id,
|
||||||
|
regarding_offer_id: offer.offer_id,
|
||||||
|
},
|
||||||
|
ValidUntil::new(server_start_instant, config.cleaning.max_offer_age),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If peer sent answer, send it on to relevant peer
|
||||||
|
if let (Some(answer), Some(answer_receiver_id), Some(offer_id)) = (
|
||||||
|
request.answer,
|
||||||
|
request.answer_to_peer_id,
|
||||||
|
request.answer_offer_id,
|
||||||
|
) {
|
||||||
|
if let Some(answer_receiver) = torrent_data.peers.get_mut(&answer_receiver_id) {
|
||||||
|
let expecting_answer = ExpectingAnswer {
|
||||||
|
from_peer_id: request.peer_id,
|
||||||
|
regarding_offer_id: offer_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(_) = answer_receiver.expecting_answers.remove(&expecting_answer) {
|
||||||
|
let answer_out_message = AnswerOutMessage {
|
||||||
|
action: AnnounceAction::Announce,
|
||||||
|
peer_id: request.peer_id,
|
||||||
|
info_hash: request.info_hash,
|
||||||
|
answer,
|
||||||
|
offer_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
let meta = OutMessageMeta {
|
||||||
|
out_message_consumer_id: answer_receiver.consumer_id,
|
||||||
|
connection_id: answer_receiver.connection_id,
|
||||||
|
pending_scrape_id: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
out_messages.push((meta, OutMessage::AnswerOutMessage(answer_out_message)));
|
||||||
|
::log::trace!("sending AnswerOutMessage to {:?}", meta);
|
||||||
|
} else {
|
||||||
|
let error_message = ErrorResponse {
|
||||||
|
action: Some(ErrorResponseAction::Announce),
|
||||||
|
info_hash: Some(request.info_hash),
|
||||||
|
failure_reason:
|
||||||
|
"Could not find the offer corresponding to your answer. It may have expired."
|
||||||
|
.into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
out_messages.push((
|
||||||
|
request_sender_meta.into(),
|
||||||
|
OutMessage::ErrorResponse(error_message),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let out_message = OutMessage::AnnounceResponse(AnnounceResponse {
|
||||||
|
action: AnnounceAction::Announce,
|
||||||
|
info_hash: request.info_hash,
|
||||||
|
complete: torrent_data.num_seeders,
|
||||||
|
incomplete: torrent_data.num_leechers(),
|
||||||
|
announce_interval: config.protocol.peer_announce_interval,
|
||||||
|
});
|
||||||
|
|
||||||
|
out_messages.push((request_sender_meta.into(), out_message));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_scrape_request(
|
||||||
|
&mut self,
|
||||||
|
config: &Config,
|
||||||
|
out_messages: &mut Vec<(OutMessageMeta, OutMessage)>,
|
||||||
|
meta: InMessageMeta,
|
||||||
|
request: ScrapeRequest,
|
||||||
|
) {
|
||||||
|
let info_hashes = if let Some(info_hashes) = request.info_hashes {
|
||||||
|
info_hashes.as_vec()
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let num_to_take = info_hashes.len().min(config.protocol.max_scrape_torrents);
|
||||||
|
|
||||||
|
let mut out_message = ScrapeResponse {
|
||||||
|
action: ScrapeAction::Scrape,
|
||||||
|
files: HashMap::with_capacity(num_to_take),
|
||||||
|
};
|
||||||
|
|
||||||
|
let torrent_map: &mut TorrentMap = if let IpVersion::V4 = meta.ip_version {
|
||||||
|
&mut self.ipv4
|
||||||
|
} else {
|
||||||
|
&mut self.ipv6
|
||||||
|
};
|
||||||
|
|
||||||
|
for info_hash in info_hashes.into_iter().take(num_to_take) {
|
||||||
|
if let Some(torrent_data) = torrent_map.get(&info_hash) {
|
||||||
|
let stats = ScrapeStatistics {
|
||||||
|
complete: torrent_data.num_seeders,
|
||||||
|
downloaded: 0, // No implementation planned
|
||||||
|
incomplete: torrent_data.num_leechers(),
|
||||||
|
};
|
||||||
|
|
||||||
|
out_message.files.insert(info_hash, stats);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out_messages.push((meta.into(), OutMessage::ScrapeResponse(out_message)));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clean(
|
||||||
|
&mut self,
|
||||||
|
config: &Config,
|
||||||
|
access_list: &Arc<AccessListArcSwap>,
|
||||||
|
server_start_instant: ServerStartInstant,
|
||||||
|
) {
|
||||||
|
let mut access_list_cache = create_access_list_cache(access_list);
|
||||||
|
let now = server_start_instant.seconds_elapsed();
|
||||||
|
|
||||||
|
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4, now, "4");
|
||||||
|
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6, now, "6");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clean_torrent_map(
|
||||||
|
config: &Config,
|
||||||
|
access_list_cache: &mut AccessListCache,
|
||||||
|
torrent_map: &mut TorrentMap,
|
||||||
|
now: SecondsSinceServerStart,
|
||||||
|
ip_version: &'static str,
|
||||||
|
) {
|
||||||
|
let mut total_num_peers = 0u64;
|
||||||
|
|
||||||
|
torrent_map.retain(|info_hash, torrent_data| {
|
||||||
|
if !access_list_cache
|
||||||
|
.load()
|
||||||
|
.allows(config.access_list.mode, &info_hash.0)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let num_seeders = &mut torrent_data.num_seeders;
|
||||||
|
|
||||||
|
torrent_data.peers.retain(|_, peer| {
|
||||||
|
peer.expecting_answers
|
||||||
|
.retain(|_, valid_until| valid_until.valid(now));
|
||||||
|
peer.expecting_answers.shrink_to_fit();
|
||||||
|
|
||||||
|
let keep = peer.valid_until.valid(now);
|
||||||
|
|
||||||
|
if (!keep) & peer.seeder {
|
||||||
|
*num_seeders -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
keep
|
||||||
|
});
|
||||||
|
|
||||||
|
total_num_peers += torrent_data.peers.len() as u64;
|
||||||
|
|
||||||
|
torrent_data.peers.shrink_to_fit();
|
||||||
|
|
||||||
|
!torrent_data.peers.is_empty()
|
||||||
|
});
|
||||||
|
|
||||||
|
torrent_map.shrink_to_fit();
|
||||||
|
|
||||||
|
let total_num_peers = total_num_peers as f64;
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::gauge!(
|
||||||
|
"aquatic_peers",
|
||||||
|
total_num_peers,
|
||||||
|
"ip_version" => ip_version,
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
pub fn update_torrent_count_metrics(&self) {
|
||||||
|
::metrics::gauge!(
|
||||||
|
"aquatic_torrents",
|
||||||
|
self.ipv4.len() as f64,
|
||||||
|
"ip_version" => "4",
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
::metrics::gauge!(
|
||||||
|
"aquatic_torrents",
|
||||||
|
self.ipv6.len() as f64,
|
||||||
|
"ip_version" => "6",
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handle_connection_closed(
|
||||||
|
&mut self,
|
||||||
|
info_hash: InfoHash,
|
||||||
|
peer_id: PeerId,
|
||||||
|
ip_version: IpVersion,
|
||||||
|
) {
|
||||||
|
::log::debug!("Removing peer from torrents because connection was closed");
|
||||||
|
|
||||||
|
if let IpVersion::V4 = ip_version {
|
||||||
|
if let Some(torrent_data) = self.ipv4.get_mut(&info_hash) {
|
||||||
|
torrent_data.remove_peer(peer_id);
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::decrement_gauge!(
|
||||||
|
"aquatic_peers",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => "4",
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if let Some(torrent_data) = self.ipv6.get_mut(&info_hash) {
|
||||||
|
torrent_data.remove_peer(peer_id);
|
||||||
|
|
||||||
|
#[cfg(feature = "metrics")]
|
||||||
|
::metrics::decrement_gauge!(
|
||||||
|
"aquatic_peers",
|
||||||
|
1.0,
|
||||||
|
"ip_version" => "6",
|
||||||
|
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TorrentData {
|
||||||
|
peers: PeerMap,
|
||||||
|
num_seeders: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TorrentData {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
peers: Default::default(),
|
||||||
|
num_seeders: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TorrentData {
|
||||||
|
fn remove_peer(&mut self, peer_id: PeerId) {
|
||||||
|
if let Some(peer) = self.peers.remove(&peer_id) {
|
||||||
|
if peer.seeder {
|
||||||
|
self.num_seeders -= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn num_leechers(&self) -> usize {
|
||||||
|
self.peers.len() - self.num_seeders
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct Peer {
|
||||||
|
pub consumer_id: ConsumerId,
|
||||||
|
pub connection_id: ConnectionId,
|
||||||
|
pub seeder: bool,
|
||||||
|
pub valid_until: ValidUntil,
|
||||||
|
pub expecting_answers: IndexMap<ExpectingAnswer, ValidUntil>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct ExpectingAnswer {
|
||||||
|
pub from_peer_id: PeerId,
|
||||||
|
pub regarding_offer_id: OfferId,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
|
||||||
|
enum PeerStatus {
|
||||||
|
Seeding,
|
||||||
|
Leeching,
|
||||||
|
Stopped,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeerStatus {
|
||||||
|
/// Determine peer status from announce event and number of bytes left.
|
||||||
|
///
|
||||||
|
/// Likely, the last branch will be taken most of the time.
|
||||||
|
#[inline]
|
||||||
|
fn from_event_and_bytes_left(event: AnnounceEvent, opt_bytes_left: Option<usize>) -> Self {
|
||||||
|
if let AnnounceEvent::Stopped = event {
|
||||||
|
Self::Stopped
|
||||||
|
} else if let Some(0) = opt_bytes_left {
|
||||||
|
Self::Seeding
|
||||||
|
} else {
|
||||||
|
Self::Leeching
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -125,6 +125,7 @@ impl Connection {
|
||||||
&self.load_test_state,
|
&self.load_test_state,
|
||||||
&mut self.rng,
|
&mut self.rng,
|
||||||
self.peer_id,
|
self.peer_id,
|
||||||
|
self.send_answer.is_none(),
|
||||||
);
|
);
|
||||||
|
|
||||||
// If self.send_answer is set and request is announce request, make
|
// If self.send_answer is set and request is announce request, make
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ pub fn create_random_request(
|
||||||
state: &LoadTestState,
|
state: &LoadTestState,
|
||||||
rng: &mut impl Rng,
|
rng: &mut impl Rng,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
|
announce_gen_offers: bool,
|
||||||
) -> InMessage {
|
) -> InMessage {
|
||||||
let weights = [
|
let weights = [
|
||||||
config.torrents.weight_announce as u32,
|
config.torrents.weight_announce as u32,
|
||||||
|
|
@ -23,7 +24,9 @@ pub fn create_random_request(
|
||||||
let dist = WeightedIndex::new(&weights).expect("random request weighted index");
|
let dist = WeightedIndex::new(&weights).expect("random request weighted index");
|
||||||
|
|
||||||
match items[dist.sample(rng)] {
|
match items[dist.sample(rng)] {
|
||||||
RequestType::Announce => create_announce_request(config, state, rng, peer_id),
|
RequestType::Announce => {
|
||||||
|
create_announce_request(config, state, rng, peer_id, announce_gen_offers)
|
||||||
|
}
|
||||||
RequestType::Scrape => create_scrape_request(config, state, rng),
|
RequestType::Scrape => create_scrape_request(config, state, rng),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -34,6 +37,7 @@ fn create_announce_request(
|
||||||
state: &LoadTestState,
|
state: &LoadTestState,
|
||||||
rng: &mut impl Rng,
|
rng: &mut impl Rng,
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
|
gen_offers: bool,
|
||||||
) -> InMessage {
|
) -> InMessage {
|
||||||
let (event, bytes_left) = {
|
let (event, bytes_left) = {
|
||||||
if rng.gen_bool(config.torrents.peer_seeder_probability) {
|
if rng.gen_bool(config.torrents.peer_seeder_probability) {
|
||||||
|
|
@ -45,6 +49,7 @@ fn create_announce_request(
|
||||||
|
|
||||||
let info_hash_index = select_info_hash_index(config, &state, rng);
|
let info_hash_index = select_info_hash_index(config, &state, rng);
|
||||||
|
|
||||||
|
let offers = if gen_offers {
|
||||||
let mut offers = Vec::with_capacity(config.torrents.offers_per_request);
|
let mut offers = Vec::with_capacity(config.torrents.offers_per_request);
|
||||||
|
|
||||||
for _ in 0..config.torrents.offers_per_request {
|
for _ in 0..config.torrents.offers_per_request {
|
||||||
|
|
@ -57,6 +62,11 @@ fn create_announce_request(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
offers
|
||||||
|
} else {
|
||||||
|
Vec::new()
|
||||||
|
};
|
||||||
|
|
||||||
InMessage::AnnounceRequest(AnnounceRequest {
|
InMessage::AnnounceRequest(AnnounceRequest {
|
||||||
action: AnnounceAction::Announce,
|
action: AnnounceAction::Announce,
|
||||||
info_hash: state.info_hashes[info_hash_index],
|
info_hash: state.info_hashes[info_hash_index],
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ pub struct InfoHash(
|
||||||
pub [u8; 20],
|
pub [u8; 20],
|
||||||
);
|
);
|
||||||
|
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct OfferId(
|
pub struct OfferId(
|
||||||
#[serde(
|
#[serde(
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue