From c949bde5324075e80825a7ccaeb614f4eecd8d64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 12 Nov 2021 13:30:50 +0100 Subject: [PATCH 01/56] WIP: udp io-uring experiments --- Cargo.lock | 20 + aquatic_udp/Cargo.toml | 6 +- aquatic_udp/src/lib/mio/mod.rs | 3 +- aquatic_udp/src/lib/mio/network_uring.rs | 521 +++++++++++++++++++++++ 4 files changed, 548 insertions(+), 2 deletions(-) create mode 100644 aquatic_udp/src/lib/mio/network_uring.rs diff --git a/Cargo.lock b/Cargo.lock index a27992b..37e798c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -179,12 +179,15 @@ dependencies = [ "aquatic_cli_helpers", "aquatic_common", "aquatic_udp_protocol", + "bytemuck", "cfg-if", "crossbeam-channel", "futures-lite", "glommio", "hex", "histogram", + "io-uring", + "libc", "log", "mimalloc", "mio", @@ -194,6 +197,7 @@ dependencies = [ "rand", "serde", "signal-hook", + "slab", "socket2 0.4.2", ] @@ -446,6 +450,12 @@ version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9df67f7bf9ef8498769f994239c45613ef0c5899415fb58e9add412d2c1a538" +[[package]] +name = "bytemuck" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" + [[package]] name = "byteorder" version = "1.4.3" @@ -1130,6 +1140,16 @@ dependencies = [ "memoffset 0.5.6", ] +[[package]] +name = "io-uring" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d75829ed9377bab6c90039fe47b9d84caceb4b5063266142e21bcce6550cda8" +dependencies = [ + "bitflags", + "libc", +] + [[package]] name = "itertools" version = "0.10.1" diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index 7fc5164..7c55a5a 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -18,7 +18,7 @@ name = "aquatic_udp" default = ["with-mio"] cpu-pinning = ["aquatic_common/cpu-pinning"] with-glommio = ["cpu-pinning", "glommio", "futures-lite"] -with-mio = ["crossbeam-channel", "histogram", "mio", "socket2"] +with-mio = ["crossbeam-channel", "histogram", "mio", "socket2", "io-uring", "libc", "bytemuck"] [dependencies] anyhow = "1" @@ -32,6 +32,7 @@ mimalloc = { version = "0.1", default-features = false } parking_lot = "0.11" rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } +slab = "0.4" signal-hook = { version = "0.3" } # mio @@ -39,6 +40,9 @@ crossbeam-channel = { version = "0.5", optional = true } histogram = { version = "0.6", optional = true } mio = { version = "0.7", features = ["udp", "os-poll", "os-util"], optional = true } socket2 = { version = "0.4.1", features = ["all"], optional = true } +io-uring = { version = "0.5", optional = true } +libc = { version = "0.2", optional = true } +bytemuck = { version = "1", optional = true } # glommio glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5", optional = true } diff --git a/aquatic_udp/src/lib/mio/mod.rs b/aquatic_udp/src/lib/mio/mod.rs index 0287f28..c7da3e1 100644 --- a/aquatic_udp/src/lib/mio/mod.rs +++ b/aquatic_udp/src/lib/mio/mod.rs @@ -17,6 +17,7 @@ use crate::config::Config; pub mod common; pub mod handlers; pub mod network; +pub mod network_uring; pub mod tasks; use common::State; @@ -98,7 +99,7 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { WorkerIndex::SocketWorker(i), ); - network::run_socket_worker( + network_uring::run_socket_worker( state, config, i, diff --git a/aquatic_udp/src/lib/mio/network_uring.rs b/aquatic_udp/src/lib/mio/network_uring.rs new file mode 100644 index 0000000..aa28fd5 --- /dev/null +++ b/aquatic_udp/src/lib/mio/network_uring.rs @@ -0,0 +1,521 @@ +use std::io::Cursor; +use std::mem::size_of_val; +use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}; +use std::os::unix::prelude::{AsRawFd}; +use std::ptr::{null_mut}; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; +use std::time::{Duration, Instant}; + +use aquatic_common::access_list::{AccessListCache, create_access_list_cache}; +use crossbeam_channel::{Receiver, Sender}; +use io_uring::SubmissionQueue; +use io_uring::types::{Fixed, Timespec}; +use libc::{c_void, in_addr, iovec, msghdr, sockaddr_in}; +use rand::prelude::{Rng, SeedableRng, StdRng}; +use slab::Slab; +use socket2::{Domain, Protocol, Socket, Type}; + +use aquatic_udp_protocol::{IpVersion, Request, Response}; + +use crate::common::handlers::*; +use crate::common::network::ConnectionMap; +use crate::common::*; +use crate::config::Config; + +use super::common::*; + +const RING_SIZE: usize = 128; +const MAX_RECV_EVENTS: usize = 1; +const MAX_SEND_EVENTS: usize = RING_SIZE - MAX_RECV_EVENTS - 1; +const NUM_BUFFERS: usize = MAX_RECV_EVENTS + MAX_SEND_EVENTS; + +#[derive(Clone, Copy, Debug, PartialEq)] +enum UserData { + RecvMsg { + slab_key: usize, + }, + SendMsg { + slab_key: usize, + }, + Timeout, +} + +impl UserData { + fn get_buffer_index(&self) -> usize { + match self { + Self::RecvMsg { slab_key } => { + *slab_key + } + Self::SendMsg { slab_key } => { + slab_key + MAX_RECV_EVENTS + } + Self::Timeout => { + unreachable!() + } + } + } +} + +impl From for UserData { + fn from(mut n: u64) -> UserData { + let bytes = bytemuck::bytes_of_mut(&mut n); + + let t = bytes[7]; + + bytes[7] = 0; + + match t { + 0 => Self::RecvMsg { + slab_key: n as usize, + }, + 1 => Self::SendMsg { + slab_key: n as usize, + }, + 2 => Self::Timeout, + _ => unreachable!(), + } + } +} + +impl Into for UserData { + fn into(self) -> u64 { + match self { + Self::RecvMsg { slab_key } => { + let mut out = slab_key as u64; + + bytemuck::bytes_of_mut(&mut out)[7] = 0; + + out + } + Self::SendMsg { slab_key } => { + let mut out = slab_key as u64; + + bytemuck::bytes_of_mut(&mut out)[7] = 1; + + out + } + Self::Timeout => { + let mut out = 0u64; + + bytemuck::bytes_of_mut(&mut out)[7] = 2; + + out + } + } + } +} + +pub fn run_socket_worker( + state: State, + config: Config, + token_num: usize, + request_sender: Sender<(ConnectedRequest, SocketAddr)>, + response_receiver: Receiver<(ConnectedResponse, SocketAddr)>, + num_bound_sockets: Arc, +) { + let mut rng = StdRng::from_entropy(); + + let socket = create_socket(&config); + + num_bound_sockets.fetch_add(1, Ordering::SeqCst); + + let mut connections = ConnectionMap::default(); + let mut access_list_cache = create_access_list_cache(&state.access_list); + let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new(); + + let cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval); + + let mut iter_counter = 0usize; + let mut last_cleaning = Instant::now(); + + let mut buffers: Vec<[u8; MAX_PACKET_SIZE]> = (0..NUM_BUFFERS).map(|_| [0; MAX_PACKET_SIZE]).collect(); + + let mut sockaddrs_ipv4 = [ + sockaddr_in { + sin_addr: in_addr { + s_addr: 0, + }, + sin_port: 0, + sin_family: 0, + sin_zero: Default::default(), + } + ; NUM_BUFFERS + ]; + + let mut iovs: Vec = (0..NUM_BUFFERS).map(|i| { + let iov_base = buffers[i].as_mut_ptr() as *mut c_void; + let iov_len = MAX_PACKET_SIZE; + + iovec { + iov_base, + iov_len, + } + }).collect(); + + let mut msghdrs: Vec = (0..NUM_BUFFERS).map(|i| { + let msg_iov: *mut iovec = &mut iovs[i]; + let msg_name: *mut sockaddr_in = &mut sockaddrs_ipv4[i]; + + msghdr { + msg_name: msg_name as *mut c_void, + msg_namelen: size_of_val(&sockaddrs_ipv4[i]) as u32, + msg_iov, + msg_iovlen: 1, + msg_control: null_mut(), + msg_controllen: 0, + msg_flags: 0, + } + }).collect(); + + let timeout = Timespec::new().nsec(500_000_000); + let mut timeout_set = false; + + let mut recv_entries = Slab::with_capacity(MAX_RECV_EVENTS); + let mut send_entries = Slab::with_capacity(MAX_SEND_EVENTS); + + let mut ring = io_uring::IoUring::new(RING_SIZE as u32).unwrap(); + + let (submitter, mut sq, mut cq) = ring.split(); + + submitter.register_files(&[socket.as_raw_fd()]).unwrap(); + + let fd = Fixed(0); + + loop { + while let Some(entry) = cq.next() { + let user_data: UserData = entry.user_data().into(); + + match user_data { + UserData::RecvMsg { slab_key } => { + recv_entries.remove(slab_key); + + let result = entry.result(); + + if result < 0 { + ::log::info!("recvmsg error {}: {:#}", result, ::std::io::Error::from_raw_os_error(-result)); + } else if result == 0 { + ::log::info!("recvmsg error: 0 bytes read"); + } else { + let buffer_index = user_data.get_buffer_index(); + let buffer_len = result as usize; + + let src = SocketAddrV4::new( + Ipv4Addr::from(u32::from_be(sockaddrs_ipv4[buffer_index].sin_addr.s_addr)), + u16::from_be(sockaddrs_ipv4[buffer_index].sin_port), + ); + + let res_request = + Request::from_bytes(&buffers[buffer_index][..buffer_len], config.protocol.max_scrape_torrents); + + handle_request( + &config, + &state, + &mut connections, + &mut access_list_cache, + &mut rng, + &request_sender, + &mut local_responses, + res_request, + SocketAddr::V4(src), + ); + } + } + UserData::SendMsg { slab_key } => { + send_entries.remove(slab_key); + + if entry.result() < 0 { + ::log::info!("recvmsg error: {:#}", ::std::io::Error::from_raw_os_error(-entry.result())); + } + } + UserData::Timeout => { + timeout_set = false; + } + } + } + + for _ in 0..(MAX_RECV_EVENTS - recv_entries.len()) { + let slab_key = recv_entries.insert(()); + let user_data = UserData::RecvMsg { slab_key }; + + let buffer_index = user_data.get_buffer_index(); + + let buf_ptr: *mut msghdr = &mut msghdrs[buffer_index]; + + let entry = io_uring::opcode::RecvMsg::new(fd, buf_ptr).build().user_data(user_data.into()); + + unsafe { + sq.push(&entry).unwrap(); + } + } + + if !timeout_set { + let user_data = UserData::Timeout; + + let timespec_ptr: *const Timespec = &timeout; + + let entry = io_uring::opcode::Timeout::new(timespec_ptr).build().user_data(user_data.into()); + + unsafe { + sq.push(&entry).unwrap(); + } + + timeout_set = true; + } + + let num_local_to_queue = (MAX_SEND_EVENTS - send_entries.len()).min(local_responses.len()); + + for (response, addr) in local_responses.drain(local_responses.len() - num_local_to_queue..) { + queue_response(&mut sq, fd, &mut send_entries, &mut buffers, &mut iovs, &mut sockaddrs_ipv4, &mut msghdrs, response, addr); + } + + for (response, addr) in response_receiver.try_iter().take(MAX_SEND_EVENTS - send_entries.len()) { + queue_response(&mut sq, fd, &mut send_entries, &mut buffers, &mut iovs, &mut sockaddrs_ipv4, &mut msghdrs, response.into(), addr); + } + + if iter_counter % 32 == 0 { + let now = Instant::now(); + + if now > last_cleaning + cleaning_duration { + connections.clean(); + + last_cleaning = now; + } + } + + let all_responses_sent = local_responses.is_empty() & response_receiver.is_empty(); + + let wait_for_num = if all_responses_sent { + send_entries.len() + recv_entries.len() + } else { + send_entries.len() + }; + + sq.sync(); + + submitter.submit_and_wait(wait_for_num).unwrap(); + + sq.sync(); + cq.sync(); + + iter_counter = iter_counter.wrapping_add(1); + } +} + +fn queue_response( + sq: &mut SubmissionQueue, + fd: Fixed, + send_events: &mut Slab<()>, + buffers: &mut [[u8; MAX_PACKET_SIZE]], + iovs: &mut [iovec], + sockaddrs: &mut [sockaddr_in], + msghdrs: &mut [msghdr], + response: Response, + src: SocketAddr, +) { + let slab_key = send_events.insert(()); + let user_data = UserData::SendMsg { slab_key }; + + let buffer_index = user_data.get_buffer_index(); + + let mut cursor = Cursor::new(&mut buffers[buffer_index][..]); + + match response.write(&mut cursor, ip_version_from_ip(src.ip())) { + Ok(()) => { + iovs[buffer_index].iov_len = cursor.position() as usize; + + let src = if let SocketAddr::V4(src) = src { + src + } else { + return; // FIXME + }; + + sockaddrs[buffer_index].sin_addr.s_addr = u32::to_be((*src.ip()).into()); + sockaddrs[buffer_index].sin_port = u16::to_be(src.port()); + } + Err(err) => { + ::log::error!("Response::write error: {:?}", err); + } + } + + let buf_ptr: *mut msghdr = &mut msghdrs[buffer_index]; + + let entry = io_uring::opcode::SendMsg::new(fd, buf_ptr).build().user_data(user_data.into()); + + unsafe { + sq.push(&entry).unwrap(); + } +} + +fn create_socket(config: &Config) -> ::std::net::UdpSocket { + let socket = if config.network.address.is_ipv4() { + Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) + } else { + Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) + } + .expect("create socket"); + + socket.set_reuse_port(true).expect("socket: set reuse port"); + + socket + .set_nonblocking(true) + .expect("socket: set nonblocking"); + + socket + .bind(&config.network.address.into()) + .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); + + let recv_buffer_size = config.network.socket_recv_buffer_size; + + if recv_buffer_size != 0 { + if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { + ::log::error!( + "socket: failed setting recv buffer to {}: {:?}", + recv_buffer_size, + err + ); + } + } + + socket.into() +} + +#[inline] +fn handle_request( + config: &Config, + state: &State, + connections: &mut ConnectionMap, + access_list_cache: &mut AccessListCache, + rng: &mut StdRng, + request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + local_responses: &mut Vec<(Response, SocketAddr)>, + res_request: Result, + src: SocketAddr, +) { + + let valid_until = ValidUntil::new(config.cleaning.max_connection_age); + let access_list_mode = config.access_list.mode; + + match res_request { + Ok(Request::Connect(request)) => { + let connection_id = ConnectionId(rng.gen()); + + connections.insert(connection_id, src, valid_until); + + let response = Response::Connect(ConnectResponse { + connection_id, + transaction_id: request.transaction_id, + }); + + local_responses.push((response, src)) + } + Ok(Request::Announce(request)) => { + if connections.contains(request.connection_id, src) { + if access_list_cache + .load() + .allows(access_list_mode, &request.info_hash.0) + { + if let Err(err) = request_sender + .try_send((ConnectedRequest::Announce(request), src)) + { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } else { + let response = Response::Error(ErrorResponse { + transaction_id: request.transaction_id, + message: "Info hash not allowed".into(), + }); + + local_responses.push((response, src)) + } + } + } + Ok(Request::Scrape(request)) => { + if connections.contains(request.connection_id, src) { + let request = ConnectedRequest::Scrape { + request, + original_indices: Vec::new(), + }; + + if let Err(err) = request_sender.try_send((request, src)) { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } + } + Err(err) => { + ::log::debug!("Request::from_bytes error: {:?}", err); + + if let RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } = err + { + if connections.contains(connection_id, src) { + let response = ErrorResponse { + transaction_id, + message: err.right_or("Parse error").into(), + }; + + local_responses.push((response.into(), src)); + } + } + } + } + +} + +fn ip_version_from_ip(ip: IpAddr) -> IpVersion { + match ip { + IpAddr::V4(_) => IpVersion::IPv4, + IpAddr::V6(ip) => { + if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { + IpVersion::IPv4 + } else { + IpVersion::IPv6 + } + } + } +} + +#[cfg(test)] +mod tests { + use quickcheck::Arbitrary; + use quickcheck_macros::quickcheck; + + use super::*; + + impl quickcheck::Arbitrary for UserData { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + match (bool::arbitrary(g), bool::arbitrary(g)) { + (false, b) => { + let slab_key: u32 = Arbitrary::arbitrary(g); + let slab_key = slab_key as usize; + + if b { + UserData::RecvMsg { + slab_key + } + } else { + UserData::SendMsg { + slab_key + } + } + } + _ => { + UserData::Timeout + } + } + } + } + + #[quickcheck] + fn test_user_data_identity(a: UserData) -> bool { + let n: u64 = a.into(); + let b = UserData::from(n); + + a == b + } +} \ No newline at end of file From c5916d9633027cbe621032f249d013de0b4bd799 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 00:37:12 +0100 Subject: [PATCH 02/56] udp: uring: add comment --- aquatic_udp/src/lib/mio/network_uring.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/aquatic_udp/src/lib/mio/network_uring.rs b/aquatic_udp/src/lib/mio/network_uring.rs index aa28fd5..93b80bb 100644 --- a/aquatic_udp/src/lib/mio/network_uring.rs +++ b/aquatic_udp/src/lib/mio/network_uring.rs @@ -252,6 +252,7 @@ pub fn run_socket_worker( } if !timeout_set { + // Setup timer to occasionally check if there are pending responses let user_data = UserData::Timeout; let timespec_ptr: *const Timespec = &timeout; From efbf51ba19f679176567dc2f120feca5117ced95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 02:39:51 +0100 Subject: [PATCH 03/56] udp: io-uring: add ipv6 support --- aquatic_udp/src/lib/mio/network_uring.rs | 272 +++++++++++++++-------- 1 file changed, 176 insertions(+), 96 deletions(-) diff --git a/aquatic_udp/src/lib/mio/network_uring.rs b/aquatic_udp/src/lib/mio/network_uring.rs index 93b80bb..078461a 100644 --- a/aquatic_udp/src/lib/mio/network_uring.rs +++ b/aquatic_udp/src/lib/mio/network_uring.rs @@ -1,19 +1,19 @@ use std::io::Cursor; -use std::mem::size_of_val; -use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4}; -use std::os::unix::prelude::{AsRawFd}; -use std::ptr::{null_mut}; +use std::mem::size_of; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::os::unix::prelude::AsRawFd; +use std::ptr::null_mut; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; use std::time::{Duration, Instant}; -use aquatic_common::access_list::{AccessListCache, create_access_list_cache}; +use aquatic_common::access_list::{create_access_list_cache, AccessListCache}; use crossbeam_channel::{Receiver, Sender}; -use io_uring::SubmissionQueue; use io_uring::types::{Fixed, Timespec}; -use libc::{c_void, in_addr, iovec, msghdr, sockaddr_in}; +use io_uring::SubmissionQueue; +use libc::{AF_INET, AF_INET6, c_void, in6_addr, in_addr, iovec, msghdr, sockaddr_in, sockaddr_in6}; use rand::prelude::{Rng, SeedableRng, StdRng}; use slab::Slab; use socket2::{Domain, Protocol, Socket, Type}; @@ -34,24 +34,16 @@ const NUM_BUFFERS: usize = MAX_RECV_EVENTS + MAX_SEND_EVENTS; #[derive(Clone, Copy, Debug, PartialEq)] enum UserData { - RecvMsg { - slab_key: usize, - }, - SendMsg { - slab_key: usize, - }, + RecvMsg { slab_key: usize }, + SendMsg { slab_key: usize }, Timeout, } impl UserData { fn get_buffer_index(&self) -> usize { match self { - Self::RecvMsg { slab_key } => { - *slab_key - } - Self::SendMsg { slab_key } => { - slab_key + MAX_RECV_EVENTS - } + Self::RecvMsg { slab_key } => *slab_key, + Self::SendMsg { slab_key } => slab_key + MAX_RECV_EVENTS, Self::Timeout => { unreachable!() } @@ -131,44 +123,62 @@ pub fn run_socket_worker( let mut iter_counter = 0usize; let mut last_cleaning = Instant::now(); - let mut buffers: Vec<[u8; MAX_PACKET_SIZE]> = (0..NUM_BUFFERS).map(|_| [0; MAX_PACKET_SIZE]).collect(); + let mut buffers: Vec<[u8; MAX_PACKET_SIZE]> = + (0..NUM_BUFFERS).map(|_| [0; MAX_PACKET_SIZE]).collect(); - let mut sockaddrs_ipv4 = [ - sockaddr_in { - sin_addr: in_addr { - s_addr: 0, - }, - sin_port: 0, - sin_family: 0, - sin_zero: Default::default(), - } - ; NUM_BUFFERS - ]; + let mut sockaddrs_ipv4 = [sockaddr_in { + sin_addr: in_addr { s_addr: 0 }, + sin_port: 0, + sin_family: AF_INET as u16, + sin_zero: Default::default(), + }; NUM_BUFFERS]; - let mut iovs: Vec = (0..NUM_BUFFERS).map(|i| { - let iov_base = buffers[i].as_mut_ptr() as *mut c_void; - let iov_len = MAX_PACKET_SIZE; + let mut sockaddrs_ipv6 = [sockaddr_in6 { + sin6_addr: in6_addr { s6_addr: [0; 16] }, + sin6_port: 0, + sin6_family: AF_INET6 as u16, + sin6_flowinfo: 0, + sin6_scope_id: 0, + }; NUM_BUFFERS]; - iovec { - iov_base, - iov_len, - } - }).collect(); + let mut iovs: Vec = (0..NUM_BUFFERS) + .map(|i| { + let iov_base = buffers[i].as_mut_ptr() as *mut c_void; + let iov_len = MAX_PACKET_SIZE; - let mut msghdrs: Vec = (0..NUM_BUFFERS).map(|i| { - let msg_iov: *mut iovec = &mut iovs[i]; - let msg_name: *mut sockaddr_in = &mut sockaddrs_ipv4[i]; + iovec { iov_base, iov_len } + }) + .collect(); - msghdr { - msg_name: msg_name as *mut c_void, - msg_namelen: size_of_val(&sockaddrs_ipv4[i]) as u32, - msg_iov, - msg_iovlen: 1, - msg_control: null_mut(), - msg_controllen: 0, - msg_flags: 0, - } - }).collect(); + let mut msghdrs: Vec = (0..NUM_BUFFERS) + .map(|i| { + let msg_iov: *mut iovec = &mut iovs[i]; + + let mut msghdr = msghdr { + msg_name: null_mut(), + msg_namelen: 0, + msg_iov, + msg_iovlen: 1, + msg_control: null_mut(), + msg_controllen: 0, + msg_flags: 0, + }; + + if config.network.address.is_ipv4() { + let ptr: *mut sockaddr_in = &mut sockaddrs_ipv4[i]; + + msghdr.msg_name = ptr as *mut c_void; + msghdr.msg_namelen = size_of::() as u32; + } else { + let ptr: *mut sockaddr_in6 = &mut sockaddrs_ipv6[i]; + + msghdr.msg_name = ptr as *mut c_void; + msghdr.msg_namelen = size_of::() as u32; + } + + msghdr + }) + .collect(); let timeout = Timespec::new().nsec(500_000_000); let mut timeout_set = false; @@ -195,20 +205,47 @@ pub fn run_socket_worker( let result = entry.result(); if result < 0 { - ::log::info!("recvmsg error {}: {:#}", result, ::std::io::Error::from_raw_os_error(-result)); + ::log::info!( + "recvmsg error {}: {:#}", + result, + ::std::io::Error::from_raw_os_error(-result) + ); } else if result == 0 { ::log::info!("recvmsg error: 0 bytes read"); } else { let buffer_index = user_data.get_buffer_index(); let buffer_len = result as usize; - let src = SocketAddrV4::new( - Ipv4Addr::from(u32::from_be(sockaddrs_ipv4[buffer_index].sin_addr.s_addr)), - u16::from_be(sockaddrs_ipv4[buffer_index].sin_port), - ); + let addr = if config.network.address.is_ipv4() { + SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::from(u32::from_be( + sockaddrs_ipv4[buffer_index].sin_addr.s_addr, + )), + u16::from_be(sockaddrs_ipv4[buffer_index].sin_port), + )) + } else { + let mut octets = sockaddrs_ipv6[buffer_index].sin6_addr.s6_addr; + let port = u16::from_be(sockaddrs_ipv6[buffer_index].sin6_port); - let res_request = - Request::from_bytes(&buffers[buffer_index][..buffer_len], config.protocol.max_scrape_torrents); + for byte in octets.iter_mut() { + *byte = u8::from_be(*byte); + } + + let ip = match octets { + // Convert IPv4-mapped address (available in std but nightly-only) + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + Ipv4Addr::new(a, b, c, d).into() + } + octets => Ipv6Addr::from(octets).into(), + }; + + SocketAddr::new(ip, port) + }; + + let res_request = Request::from_bytes( + &buffers[buffer_index][..buffer_len], + config.protocol.max_scrape_torrents, + ); handle_request( &config, @@ -219,7 +256,7 @@ pub fn run_socket_worker( &request_sender, &mut local_responses, res_request, - SocketAddr::V4(src), + addr, ); } } @@ -227,7 +264,10 @@ pub fn run_socket_worker( send_entries.remove(slab_key); if entry.result() < 0 { - ::log::info!("recvmsg error: {:#}", ::std::io::Error::from_raw_os_error(-entry.result())); + ::log::info!( + "sendmsg error: {:#}", + ::std::io::Error::from_raw_os_error(-entry.result()) + ); } } UserData::Timeout => { @@ -240,11 +280,11 @@ pub fn run_socket_worker( let slab_key = recv_entries.insert(()); let user_data = UserData::RecvMsg { slab_key }; - let buffer_index = user_data.get_buffer_index(); + let msghdr_ptr: *mut msghdr = &mut msghdrs[user_data.get_buffer_index()]; - let buf_ptr: *mut msghdr = &mut msghdrs[buffer_index]; - - let entry = io_uring::opcode::RecvMsg::new(fd, buf_ptr).build().user_data(user_data.into()); + let entry = io_uring::opcode::RecvMsg::new(fd, msghdr_ptr) + .build() + .user_data(user_data.into()); unsafe { sq.push(&entry).unwrap(); @@ -257,7 +297,9 @@ pub fn run_socket_worker( let timespec_ptr: *const Timespec = &timeout; - let entry = io_uring::opcode::Timeout::new(timespec_ptr).build().user_data(user_data.into()); + let entry = io_uring::opcode::Timeout::new(timespec_ptr) + .build() + .user_data(user_data.into()); unsafe { sq.push(&entry).unwrap(); @@ -268,12 +310,40 @@ pub fn run_socket_worker( let num_local_to_queue = (MAX_SEND_EVENTS - send_entries.len()).min(local_responses.len()); - for (response, addr) in local_responses.drain(local_responses.len() - num_local_to_queue..) { - queue_response(&mut sq, fd, &mut send_entries, &mut buffers, &mut iovs, &mut sockaddrs_ipv4, &mut msghdrs, response, addr); + for (response, addr) in local_responses.drain(local_responses.len() - num_local_to_queue..) + { + queue_response( + &config, + &mut sq, + fd, + &mut send_entries, + &mut buffers, + &mut iovs, + &mut sockaddrs_ipv4, + &mut sockaddrs_ipv6, + &mut msghdrs, + response, + addr, + ); } - for (response, addr) in response_receiver.try_iter().take(MAX_SEND_EVENTS - send_entries.len()) { - queue_response(&mut sq, fd, &mut send_entries, &mut buffers, &mut iovs, &mut sockaddrs_ipv4, &mut msghdrs, response.into(), addr); + for (response, addr) in response_receiver + .try_iter() + .take(MAX_SEND_EVENTS - send_entries.len()) + { + queue_response( + &config, + &mut sq, + fd, + &mut send_entries, + &mut buffers, + &mut iovs, + &mut sockaddrs_ipv4, + &mut sockaddrs_ipv6, + &mut msghdrs, + response.into(), + addr, + ); } if iter_counter % 32 == 0 { @@ -306,15 +376,17 @@ pub fn run_socket_worker( } fn queue_response( + config: &Config, sq: &mut SubmissionQueue, fd: Fixed, send_events: &mut Slab<()>, buffers: &mut [[u8; MAX_PACKET_SIZE]], iovs: &mut [iovec], - sockaddrs: &mut [sockaddr_in], + sockaddrs_ipv4: &mut [sockaddr_in], + sockaddrs_ipv6: &mut [sockaddr_in6], msghdrs: &mut [msghdr], response: Response, - src: SocketAddr, + addr: SocketAddr, ) { let slab_key = send_events.insert(()); let user_data = UserData::SendMsg { slab_key }; @@ -323,27 +395,43 @@ fn queue_response( let mut cursor = Cursor::new(&mut buffers[buffer_index][..]); - match response.write(&mut cursor, ip_version_from_ip(src.ip())) { + match response.write(&mut cursor, ip_version_from_ip(addr.ip())) { Ok(()) => { iovs[buffer_index].iov_len = cursor.position() as usize; - let src = if let SocketAddr::V4(src) = src { - src - } else { - return; // FIXME - }; + if config.network.address.is_ipv4() { + let addr = if let SocketAddr::V4(addr) = addr { + addr + } else { + unreachable!(); + }; - sockaddrs[buffer_index].sin_addr.s_addr = u32::to_be((*src.ip()).into()); - sockaddrs[buffer_index].sin_port = u16::to_be(src.port()); + sockaddrs_ipv4[buffer_index].sin_addr.s_addr = u32::to_be((*addr.ip()).into()); + sockaddrs_ipv4[buffer_index].sin_port = u16::to_be(addr.port()); + } else { + let mut octets = match addr { + SocketAddr::V4(addr) => addr.ip().to_ipv6_mapped().octets(), + SocketAddr::V6(addr) => addr.ip().octets(), + }; + + for byte in octets.iter_mut() { + *byte = byte.to_be(); + } + + sockaddrs_ipv6[buffer_index].sin6_addr.s6_addr = octets; + sockaddrs_ipv6[buffer_index].sin6_port = u16::to_be(addr.port()); + } } Err(err) => { ::log::error!("Response::write error: {:?}", err); } } - let buf_ptr: *mut msghdr = &mut msghdrs[buffer_index]; + let msghdr_ptr: *mut msghdr = &mut msghdrs[buffer_index]; - let entry = io_uring::opcode::SendMsg::new(fd, buf_ptr).build().user_data(user_data.into()); + let entry = io_uring::opcode::SendMsg::new(fd, msghdr_ptr) + .build() + .user_data(user_data.into()); unsafe { sq.push(&entry).unwrap(); @@ -395,7 +483,6 @@ fn handle_request( res_request: Result, src: SocketAddr, ) { - let valid_until = ValidUntil::new(config.cleaning.max_connection_age); let access_list_mode = config.access_list.mode; @@ -418,8 +505,8 @@ fn handle_request( .load() .allows(access_list_mode, &request.info_hash.0) { - if let Err(err) = request_sender - .try_send((ConnectedRequest::Announce(request), src)) + if let Err(err) = + request_sender.try_send((ConnectedRequest::Announce(request), src)) { ::log::warn!("request_sender.try_send failed: {:?}", err) } @@ -465,7 +552,6 @@ fn handle_request( } } } - } fn ip_version_from_ip(ip: IpAddr) -> IpVersion { @@ -496,18 +582,12 @@ mod tests { let slab_key = slab_key as usize; if b { - UserData::RecvMsg { - slab_key - } + UserData::RecvMsg { slab_key } } else { - UserData::SendMsg { - slab_key - } + UserData::SendMsg { slab_key } } } - _ => { - UserData::Timeout - } + _ => UserData::Timeout, } } } @@ -519,4 +599,4 @@ mod tests { a == b } -} \ No newline at end of file +} From 18635bf26cf147da24ce8c0f043306d325c2cfdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 02:47:15 +0100 Subject: [PATCH 04/56] udp: add io-uring implementation --- aquatic_udp/Cargo.toml | 11 ++++++-- aquatic_udp/src/lib/lib.rs | 4 +-- aquatic_udp/src/lib/{mio => other}/common.rs | 0 .../src/lib/{mio => other}/handlers.rs | 2 +- aquatic_udp/src/lib/{mio => other}/mod.rs | 28 +++++++++++++------ .../{mio/network.rs => other/network_mio.rs} | 0 .../src/lib/{mio => other}/network_uring.rs | 7 +++-- aquatic_udp/src/lib/{mio => other}/tasks.rs | 0 aquatic_udp_bench/src/main.rs | 4 +-- 9 files changed, 37 insertions(+), 19 deletions(-) rename aquatic_udp/src/lib/{mio => other}/common.rs (100%) rename aquatic_udp/src/lib/{mio => other}/handlers.rs (99%) rename aquatic_udp/src/lib/{mio => other}/mod.rs (84%) rename aquatic_udp/src/lib/{mio/network.rs => other/network_mio.rs} (100%) rename aquatic_udp/src/lib/{mio => other}/network_uring.rs (99%) rename aquatic_udp/src/lib/{mio => other}/tasks.rs (100%) diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index 7c55a5a..d7a7b90 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -18,7 +18,8 @@ name = "aquatic_udp" default = ["with-mio"] cpu-pinning = ["aquatic_common/cpu-pinning"] with-glommio = ["cpu-pinning", "glommio", "futures-lite"] -with-mio = ["crossbeam-channel", "histogram", "mio", "socket2", "io-uring", "libc", "bytemuck"] +with-mio = ["crossbeam-channel", "histogram", "mio", "socket2"] +with-io-uring = ["crossbeam-channel", "histogram", "socket2", "io-uring", "libc", "bytemuck"] [dependencies] anyhow = "1" @@ -35,11 +36,15 @@ serde = { version = "1", features = ["derive"] } slab = "0.4" signal-hook = { version = "0.3" } -# mio +# mio / io-uring crossbeam-channel = { version = "0.5", optional = true } histogram = { version = "0.6", optional = true } -mio = { version = "0.7", features = ["udp", "os-poll", "os-util"], optional = true } socket2 = { version = "0.4.1", features = ["all"], optional = true } + +# mio +mio = { version = "0.7", features = ["udp", "os-poll", "os-util"], optional = true } + +# io-uring io-uring = { version = "0.5", optional = true } libc = { version = "0.2", optional = true } bytemuck = { version = "1", optional = true } diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index 34e25a8..2f3c924 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -5,7 +5,7 @@ pub mod config; #[cfg(all(feature = "with-glommio", target_os = "linux"))] pub mod glommio; #[cfg(feature = "with-mio")] -pub mod mio; +pub mod other; use config::Config; @@ -16,7 +16,7 @@ pub fn run(config: Config) -> ::anyhow::Result<()> { if #[cfg(all(feature = "with-glommio", target_os = "linux"))] { glommio::run(config) } else { - mio::run(config) + other::run(config) } } } diff --git a/aquatic_udp/src/lib/mio/common.rs b/aquatic_udp/src/lib/other/common.rs similarity index 100% rename from aquatic_udp/src/lib/mio/common.rs rename to aquatic_udp/src/lib/other/common.rs diff --git a/aquatic_udp/src/lib/mio/handlers.rs b/aquatic_udp/src/lib/other/handlers.rs similarity index 99% rename from aquatic_udp/src/lib/mio/handlers.rs rename to aquatic_udp/src/lib/other/handlers.rs index 7019b98..0c2c5f2 100644 --- a/aquatic_udp/src/lib/mio/handlers.rs +++ b/aquatic_udp/src/lib/other/handlers.rs @@ -9,7 +9,7 @@ use aquatic_udp_protocol::*; use crate::common::handlers::*; use crate::config::Config; -use crate::mio::common::*; +use crate::other::common::*; pub fn run_request_worker( state: State, diff --git a/aquatic_udp/src/lib/mio/mod.rs b/aquatic_udp/src/lib/other/mod.rs similarity index 84% rename from aquatic_udp/src/lib/mio/mod.rs rename to aquatic_udp/src/lib/other/mod.rs index c7da3e1..c526492 100644 --- a/aquatic_udp/src/lib/mio/mod.rs +++ b/aquatic_udp/src/lib/other/mod.rs @@ -16,7 +16,7 @@ use crate::config::Config; pub mod common; pub mod handlers; -pub mod network; +pub mod network_mio; pub mod network_uring; pub mod tasks; @@ -99,13 +99,25 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { WorkerIndex::SocketWorker(i), ); - network_uring::run_socket_worker( - state, - config, - i, - request_sender, - response_receiver, - num_bound_sockets, + cfg_if::cfg_if!( + if #[cfg(feature = "with-io-uring")] { + network_uring::run_socket_worker( + state, + config, + request_sender, + response_receiver, + num_bound_sockets, + ) + } else if #[cfg(feature = "with-mio")] { + network_mio::run_socket_worker( + state, + config, + i, + request_sender, + response_receiver, + num_bound_sockets, + ) + } ) }) .with_context(|| "spawn socket worker")?; diff --git a/aquatic_udp/src/lib/mio/network.rs b/aquatic_udp/src/lib/other/network_mio.rs similarity index 100% rename from aquatic_udp/src/lib/mio/network.rs rename to aquatic_udp/src/lib/other/network_mio.rs diff --git a/aquatic_udp/src/lib/mio/network_uring.rs b/aquatic_udp/src/lib/other/network_uring.rs similarity index 99% rename from aquatic_udp/src/lib/mio/network_uring.rs rename to aquatic_udp/src/lib/other/network_uring.rs index 078461a..cae1923 100644 --- a/aquatic_udp/src/lib/mio/network_uring.rs +++ b/aquatic_udp/src/lib/other/network_uring.rs @@ -1,6 +1,6 @@ use std::io::Cursor; use std::mem::size_of; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4}; use std::os::unix::prelude::AsRawFd; use std::ptr::null_mut; use std::sync::{ @@ -13,7 +13,9 @@ use aquatic_common::access_list::{create_access_list_cache, AccessListCache}; use crossbeam_channel::{Receiver, Sender}; use io_uring::types::{Fixed, Timespec}; use io_uring::SubmissionQueue; -use libc::{AF_INET, AF_INET6, c_void, in6_addr, in_addr, iovec, msghdr, sockaddr_in, sockaddr_in6}; +use libc::{ + c_void, in6_addr, in_addr, iovec, msghdr, sockaddr_in, sockaddr_in6, AF_INET, AF_INET6, +}; use rand::prelude::{Rng, SeedableRng, StdRng}; use slab::Slab; use socket2::{Domain, Protocol, Socket, Type}; @@ -103,7 +105,6 @@ impl Into for UserData { pub fn run_socket_worker( state: State, config: Config, - token_num: usize, request_sender: Sender<(ConnectedRequest, SocketAddr)>, response_receiver: Receiver<(ConnectedResponse, SocketAddr)>, num_bound_sockets: Arc, diff --git a/aquatic_udp/src/lib/mio/tasks.rs b/aquatic_udp/src/lib/other/tasks.rs similarity index 100% rename from aquatic_udp/src/lib/mio/tasks.rs rename to aquatic_udp/src/lib/other/tasks.rs diff --git a/aquatic_udp_bench/src/main.rs b/aquatic_udp_bench/src/main.rs index 28c210e..6d294e2 100644 --- a/aquatic_udp_bench/src/main.rs +++ b/aquatic_udp_bench/src/main.rs @@ -15,8 +15,8 @@ use std::time::Duration; use aquatic_cli_helpers::run_app_with_cli_and_config; use aquatic_udp::common::*; use aquatic_udp::config::Config; -use aquatic_udp::mio::common::*; -use aquatic_udp::mio::handlers; +use aquatic_udp::other::common::*; +use aquatic_udp::other::handlers; use config::BenchConfig; From 5a34bd4b814865c24e624b40a1b24a73f7a9c6d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 02:47:37 +0100 Subject: [PATCH 05/56] udp load test: fix ipv6 issues, improve documentation --- aquatic_udp_load_test/src/common.rs | 22 ++++++++++------------ aquatic_udp_load_test/src/main.rs | 21 +++++++-------------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/aquatic_udp_load_test/src/common.rs b/aquatic_udp_load_test/src/common.rs index 681bec9..98ef435 100644 --- a/aquatic_udp_load_test/src/common.rs +++ b/aquatic_udp_load_test/src/common.rs @@ -17,12 +17,12 @@ pub struct ThreadId(pub u8); #[serde(default)] pub struct Config { /// Server address + /// + /// If you want to send IPv4 requests to a IPv4+IPv6 tracker, put an IPv4 + /// address here. pub server_address: SocketAddr, pub log_level: LogLevel, /// Number of sockets and socket worker threads - /// - /// Sockets will bind to one port each, and with - /// multiple_client_ips = true, additionally to one IP each. pub num_socket_workers: u8, /// Number of workers generating requests from responses, as well as /// requests not connected to previous ones. @@ -38,15 +38,14 @@ pub struct Config { #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct NetworkConfig { - /// True means bind to one localhost IP per socket. On macOS, this by - /// default causes all server responses to go to one socket worker. - /// Default option ("true") can cause issues on macOS. + /// True means bind to one localhost IP per socket. /// /// The point of multiple IPs is to possibly cause a better distribution - /// of requests to servers with SO_REUSEPORT option. - pub multiple_client_ips: bool, - /// Use Ipv6 only - pub ipv6_client: bool, + /// of requests to servers with SO_REUSEPORT option, but it doesn't + /// necessarily help. + /// + /// Setting this to true can cause issues on macOS. + pub multiple_client_ipv4s: bool, /// Number of first client port pub first_port: u16, /// Socket worker poll timeout in microseconds @@ -121,8 +120,7 @@ impl Default for Config { impl Default for NetworkConfig { fn default() -> Self { Self { - multiple_client_ips: true, - ipv6_client: false, + multiple_client_ipv4s: false, first_port: 45_000, poll_timeout: 276, poll_event_capacity: 2_877, diff --git a/aquatic_udp_load_test/src/main.rs b/aquatic_udp_load_test/src/main.rs index a65ee35..0b34bb2 100644 --- a/aquatic_udp_load_test/src/main.rs +++ b/aquatic_udp_load_test/src/main.rs @@ -72,25 +72,18 @@ fn run(config: Config) -> ::anyhow::Result<()> { let (sender, receiver) = unbounded(); let port = config.network.first_port + (i as u16); - let addr = if config.network.multiple_client_ips { - let ip = if config.network.ipv6_client { - // FIXME: test ipv6 - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1 + i as u16).into() - } else { - Ipv4Addr::new(127, 0, 0, 1 + i).into() - }; - - SocketAddr::new(ip, port) + let ip = if config.server_address.is_ipv6() { + Ipv6Addr::LOCALHOST.into() } else { - let ip = if config.network.ipv6_client { - Ipv6Addr::LOCALHOST.into() + if config.network.multiple_client_ipv4s { + Ipv4Addr::new(127, 0, 0, 1 + i).into() } else { Ipv4Addr::LOCALHOST.into() - }; - - SocketAddr::new(ip, port) + } }; + let addr = SocketAddr::new(ip, port); + request_senders.push(sender); let config = config.clone(); From a665b38536ac6888b1c7d69eeaaf7d35b72209f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 02:57:19 +0100 Subject: [PATCH 06/56] udp: uring: clean up, improve error handling --- aquatic_udp/src/lib/other/network_uring.rs | 209 +++++++++++---------- 1 file changed, 106 insertions(+), 103 deletions(-) diff --git a/aquatic_udp/src/lib/other/network_uring.rs b/aquatic_udp/src/lib/other/network_uring.rs index cae1923..520fff9 100644 --- a/aquatic_udp/src/lib/other/network_uring.rs +++ b/aquatic_udp/src/lib/other/network_uring.rs @@ -119,11 +119,6 @@ pub fn run_socket_worker( let mut access_list_cache = create_access_list_cache(&state.access_list); let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new(); - let cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval); - - let mut iter_counter = 0usize; - let mut last_cleaning = Instant::now(); - let mut buffers: Vec<[u8; MAX_PACKET_SIZE]> = (0..NUM_BUFFERS).map(|_| [0; MAX_PACKET_SIZE]).collect(); @@ -195,6 +190,11 @@ pub fn run_socket_worker( let fd = Fixed(0); + let cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval); + + let mut iter_counter = 0usize; + let mut last_cleaning = Instant::now(); + loop { while let Some(entry) = cq.next() { let user_data: UserData = entry.user_data().into(); @@ -265,7 +265,7 @@ pub fn run_socket_worker( send_entries.remove(slab_key); if entry.result() < 0 { - ::log::info!( + ::log::error!( "sendmsg error: {:#}", ::std::io::Error::from_raw_os_error(-entry.result()) ); @@ -376,103 +376,6 @@ pub fn run_socket_worker( } } -fn queue_response( - config: &Config, - sq: &mut SubmissionQueue, - fd: Fixed, - send_events: &mut Slab<()>, - buffers: &mut [[u8; MAX_PACKET_SIZE]], - iovs: &mut [iovec], - sockaddrs_ipv4: &mut [sockaddr_in], - sockaddrs_ipv6: &mut [sockaddr_in6], - msghdrs: &mut [msghdr], - response: Response, - addr: SocketAddr, -) { - let slab_key = send_events.insert(()); - let user_data = UserData::SendMsg { slab_key }; - - let buffer_index = user_data.get_buffer_index(); - - let mut cursor = Cursor::new(&mut buffers[buffer_index][..]); - - match response.write(&mut cursor, ip_version_from_ip(addr.ip())) { - Ok(()) => { - iovs[buffer_index].iov_len = cursor.position() as usize; - - if config.network.address.is_ipv4() { - let addr = if let SocketAddr::V4(addr) = addr { - addr - } else { - unreachable!(); - }; - - sockaddrs_ipv4[buffer_index].sin_addr.s_addr = u32::to_be((*addr.ip()).into()); - sockaddrs_ipv4[buffer_index].sin_port = u16::to_be(addr.port()); - } else { - let mut octets = match addr { - SocketAddr::V4(addr) => addr.ip().to_ipv6_mapped().octets(), - SocketAddr::V6(addr) => addr.ip().octets(), - }; - - for byte in octets.iter_mut() { - *byte = byte.to_be(); - } - - sockaddrs_ipv6[buffer_index].sin6_addr.s6_addr = octets; - sockaddrs_ipv6[buffer_index].sin6_port = u16::to_be(addr.port()); - } - } - Err(err) => { - ::log::error!("Response::write error: {:?}", err); - } - } - - let msghdr_ptr: *mut msghdr = &mut msghdrs[buffer_index]; - - let entry = io_uring::opcode::SendMsg::new(fd, msghdr_ptr) - .build() - .user_data(user_data.into()); - - unsafe { - sq.push(&entry).unwrap(); - } -} - -fn create_socket(config: &Config) -> ::std::net::UdpSocket { - let socket = if config.network.address.is_ipv4() { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } - .expect("create socket"); - - socket.set_reuse_port(true).expect("socket: set reuse port"); - - socket - .set_nonblocking(true) - .expect("socket: set nonblocking"); - - socket - .bind(&config.network.address.into()) - .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { - ::log::error!( - "socket: failed setting recv buffer to {}: {:?}", - recv_buffer_size, - err - ); - } - } - - socket.into() -} - -#[inline] fn handle_request( config: &Config, state: &State, @@ -555,6 +458,106 @@ fn handle_request( } } +fn queue_response( + config: &Config, + sq: &mut SubmissionQueue, + fd: Fixed, + send_entries: &mut Slab<()>, + buffers: &mut [[u8; MAX_PACKET_SIZE]], + iovs: &mut [iovec], + sockaddrs_ipv4: &mut [sockaddr_in], + sockaddrs_ipv6: &mut [sockaddr_in6], + msghdrs: &mut [msghdr], + response: Response, + addr: SocketAddr, +) { + let slab_key = send_entries.insert(()); + let user_data = UserData::SendMsg { slab_key }; + + let buffer_index = user_data.get_buffer_index(); + + let mut cursor = Cursor::new(&mut buffers[buffer_index][..]); + + match response.write(&mut cursor, ip_version_from_ip(addr.ip())) { + Ok(()) => { + iovs[buffer_index].iov_len = cursor.position() as usize; + + if config.network.address.is_ipv4() { + let addr = if let SocketAddr::V4(addr) = addr { + addr + } else { + unreachable!(); + }; + + sockaddrs_ipv4[buffer_index].sin_addr.s_addr = u32::to_be((*addr.ip()).into()); + sockaddrs_ipv4[buffer_index].sin_port = u16::to_be(addr.port()); + } else { + let mut octets = match addr { + SocketAddr::V4(addr) => addr.ip().to_ipv6_mapped().octets(), + SocketAddr::V6(addr) => addr.ip().octets(), + }; + + for byte in octets.iter_mut() { + *byte = byte.to_be(); + } + + sockaddrs_ipv6[buffer_index].sin6_addr.s6_addr = octets; + sockaddrs_ipv6[buffer_index].sin6_port = u16::to_be(addr.port()); + } + } + Err(err) => { + ::log::error!("Response::write error: {:?}", err); + + send_entries.remove(slab_key); + + return; + } + } + + let msghdr_ptr: *mut msghdr = &mut msghdrs[buffer_index]; + + let entry = io_uring::opcode::SendMsg::new(fd, msghdr_ptr) + .build() + .user_data(user_data.into()); + + unsafe { + sq.push(&entry).unwrap(); + } +} + +fn create_socket(config: &Config) -> ::std::net::UdpSocket { + let socket = if config.network.address.is_ipv4() { + Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) + } else { + Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) + } + .expect("create socket"); + + socket.set_reuse_port(true).expect("socket: set reuse port"); + + socket + .set_nonblocking(true) + .expect("socket: set nonblocking"); + + socket + .bind(&config.network.address.into()) + .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); + + let recv_buffer_size = config.network.socket_recv_buffer_size; + + if recv_buffer_size != 0 { + if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { + ::log::error!( + "socket: failed setting recv buffer to {}: {:?}", + recv_buffer_size, + err + ); + } + } + + socket.into() +} + fn ip_version_from_ip(ip: IpAddr) -> IpVersion { match ip { IpAddr::V4(_) => IpVersion::IPv4, From ce1c0b24c34ef45c041b3e08258a0a9eac4cd79c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 03:04:10 +0100 Subject: [PATCH 07/56] udp: fix build failure; fix scripts/run-aquatic-udp.sh --- aquatic_udp/src/lib/other/mod.rs | 10 ++++++---- scripts/run-aquatic-udp.sh | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/aquatic_udp/src/lib/other/mod.rs b/aquatic_udp/src/lib/other/mod.rs index c526492..19e7c31 100644 --- a/aquatic_udp/src/lib/other/mod.rs +++ b/aquatic_udp/src/lib/other/mod.rs @@ -16,7 +16,9 @@ use crate::config::Config; pub mod common; pub mod handlers; +#[cfg(feature = "with-mio")] pub mod network_mio; +#[cfg(feature = "with-io-uring")] pub mod network_uring; pub mod tasks; @@ -107,8 +109,8 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { request_sender, response_receiver, num_bound_sockets, - ) - } else if #[cfg(feature = "with-mio")] { + ); + } else { network_mio::run_socket_worker( state, config, @@ -116,9 +118,9 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { request_sender, response_receiver, num_bound_sockets, - ) + ); } - ) + ); }) .with_context(|| "spawn socket worker")?; } diff --git a/scripts/run-aquatic-udp.sh b/scripts/run-aquatic-udp.sh index db41e58..0af7880 100755 --- a/scripts/run-aquatic-udp.sh +++ b/scripts/run-aquatic-udp.sh @@ -2,11 +2,13 @@ . ./scripts/env-native-cpu-without-avx-512 -if [ "$1" != "mio" ] && [ "$1" != "glommio" ]; then - echo "Usage: $0 [mio|glommio] [ARGS]" +if [ "$1" != "mio" ] && [ "$1" != "glommio" ] && [ "$1" != "io-uring" ]; then + echo "Usage: $0 [mio|glommio|io-uring] [ARGS]" else if [ "$1" = "mio" ]; then cargo run --release --bin aquatic_udp -- "${@:2}" + elif [ "$1" = "io-uring" ]; then + cargo run --release --features "with-io-uring" --no-default-features --bin aquatic_udp -- "${@:2}" else cargo run --release --features "with-glommio" --no-default-features --bin aquatic_udp -- "${@:2}" fi From d18117595ee9d2542f58d664f58bcd08886acfb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 03:19:33 +0100 Subject: [PATCH 08/56] udp: move code shared by mio/uring impls to common.rs --- aquatic_udp/src/lib/other/common.rs | 140 +++++++++++++++++++- aquatic_udp/src/lib/other/network_mio.rs | 135 +++----------------- aquatic_udp/src/lib/other/network_uring.rs | 142 ++------------------- 3 files changed, 162 insertions(+), 255 deletions(-) diff --git a/aquatic_udp/src/lib/other/common.rs b/aquatic_udp/src/lib/other/common.rs index bcaff2f..a1f62f8 100644 --- a/aquatic_udp/src/lib/other/common.rs +++ b/aquatic_udp/src/lib/other/common.rs @@ -1,8 +1,17 @@ -use aquatic_common::access_list::AccessListArcSwap; +use aquatic_common::access_list::{AccessListArcSwap, AccessListCache}; +use aquatic_udp_protocol::*; +use crossbeam_channel::Sender; use parking_lot::Mutex; -use std::sync::{atomic::AtomicUsize, Arc}; +use rand::{prelude::StdRng, Rng}; +use socket2::{Domain, Protocol, Socket, Type}; +use std::{ + net::{IpAddr, SocketAddr}, + sync::{atomic::AtomicUsize, Arc}, +}; use crate::common::*; +use crate::common::{handlers::ConnectedRequest, network::ConnectionMap}; +use crate::config::Config; #[derive(Default)] pub struct Statistics { @@ -28,3 +37,130 @@ impl Default for State { } } } + +pub fn handle_request( + config: &Config, + connections: &mut ConnectionMap, + access_list_cache: &mut AccessListCache, + rng: &mut StdRng, + request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + local_responses: &mut Vec<(Response, SocketAddr)>, + valid_until: ValidUntil, + res_request: Result, + src: SocketAddr, +) { + let access_list_mode = config.access_list.mode; + + match res_request { + Ok(Request::Connect(request)) => { + let connection_id = ConnectionId(rng.gen()); + + connections.insert(connection_id, src, valid_until); + + let response = Response::Connect(ConnectResponse { + connection_id, + transaction_id: request.transaction_id, + }); + + local_responses.push((response, src)) + } + Ok(Request::Announce(request)) => { + if connections.contains(request.connection_id, src) { + if access_list_cache + .load() + .allows(access_list_mode, &request.info_hash.0) + { + if let Err(err) = + request_sender.try_send((ConnectedRequest::Announce(request), src)) + { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } else { + let response = Response::Error(ErrorResponse { + transaction_id: request.transaction_id, + message: "Info hash not allowed".into(), + }); + + local_responses.push((response, src)) + } + } + } + Ok(Request::Scrape(request)) => { + if connections.contains(request.connection_id, src) { + let request = ConnectedRequest::Scrape { + request, + original_indices: Vec::new(), + }; + + if let Err(err) = request_sender.try_send((request, src)) { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } + } + Err(err) => { + ::log::debug!("Request::from_bytes error: {:?}", err); + + if let RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } = err + { + if connections.contains(connection_id, src) { + let response = ErrorResponse { + transaction_id, + message: err.right_or("Parse error").into(), + }; + + local_responses.push((response.into(), src)); + } + } + } + } +} + +pub fn ip_version_from_ip(ip: IpAddr) -> IpVersion { + match ip { + IpAddr::V4(_) => IpVersion::IPv4, + IpAddr::V6(ip) => { + if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { + IpVersion::IPv4 + } else { + IpVersion::IPv6 + } + } + } +} + +pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { + let socket = if config.network.address.is_ipv4() { + Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) + } else { + Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) + } + .expect("create socket"); + + socket.set_reuse_port(true).expect("socket: set reuse port"); + + socket + .set_nonblocking(true) + .expect("socket: set nonblocking"); + + socket + .bind(&config.network.address.into()) + .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); + + let recv_buffer_size = config.network.socket_recv_buffer_size; + + if recv_buffer_size != 0 { + if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { + ::log::error!( + "socket: failed setting recv buffer to {}: {:?}", + recv_buffer_size, + err + ); + } + } + + socket.into() +} diff --git a/aquatic_udp/src/lib/other/network_mio.rs b/aquatic_udp/src/lib/other/network_mio.rs index dfe00d6..d04fb2a 100644 --- a/aquatic_udp/src/lib/other/network_mio.rs +++ b/aquatic_udp/src/lib/other/network_mio.rs @@ -1,5 +1,5 @@ use std::io::{Cursor, ErrorKind}; -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -11,10 +11,9 @@ use aquatic_common::access_list::create_access_list_cache; use crossbeam_channel::{Receiver, Sender}; use mio::net::UdpSocket; use mio::{Events, Interest, Poll, Token}; -use rand::prelude::{Rng, SeedableRng, StdRng}; -use socket2::{Domain, Protocol, Socket, Type}; +use rand::prelude::{SeedableRng, StdRng}; -use aquatic_udp_protocol::{IpVersion, Request, Response}; +use aquatic_udp_protocol::{Request, Response}; use crate::common::handlers::*; use crate::common::network::ConnectionMap; @@ -101,39 +100,6 @@ pub fn run_socket_worker( } } -fn create_socket(config: &Config) -> ::std::net::UdpSocket { - let socket = if config.network.address.is_ipv4() { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } - .expect("create socket"); - - socket.set_reuse_port(true).expect("socket: set reuse port"); - - socket - .set_nonblocking(true) - .expect("socket: set nonblocking"); - - socket - .bind(&config.network.address.into()) - .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { - ::log::error!( - "socket: failed setting recv buffer to {}: {:?}", - recv_buffer_size, - err - ); - } - } - - socket.into() -} - #[inline] fn read_requests( config: &Config, @@ -149,88 +115,32 @@ fn read_requests( let mut bytes_received: usize = 0; let valid_until = ValidUntil::new(config.cleaning.max_connection_age); - let access_list_mode = config.access_list.mode; let mut access_list_cache = create_access_list_cache(&state.access_list); loop { match socket.recv_from(&mut buffer[..]) { Ok((amt, src)) => { - let request = + let res_request = Request::from_bytes(&buffer[..amt], config.protocol.max_scrape_torrents); bytes_received += amt; - if request.is_ok() { + if res_request.is_ok() { requests_received += 1; } - match request { - Ok(Request::Connect(request)) => { - let connection_id = ConnectionId(rng.gen()); - - connections.insert(connection_id, src, valid_until); - - let response = Response::Connect(ConnectResponse { - connection_id, - transaction_id: request.transaction_id, - }); - - local_responses.push((response, src)) - } - Ok(Request::Announce(request)) => { - if connections.contains(request.connection_id, src) { - if access_list_cache - .load() - .allows(access_list_mode, &request.info_hash.0) - { - if let Err(err) = request_sender - .try_send((ConnectedRequest::Announce(request), src)) - { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } - } else { - let response = Response::Error(ErrorResponse { - transaction_id: request.transaction_id, - message: "Info hash not allowed".into(), - }); - - local_responses.push((response, src)) - } - } - } - Ok(Request::Scrape(request)) => { - if connections.contains(request.connection_id, src) { - let request = ConnectedRequest::Scrape { - request, - original_indices: Vec::new(), - }; - - if let Err(err) = request_sender.try_send((request, src)) { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } - } - } - Err(err) => { - ::log::debug!("Request::from_bytes error: {:?}", err); - - if let RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } = err - { - if connections.contains(connection_id, src) { - let response = ErrorResponse { - transaction_id, - message: err.right_or("Parse error").into(), - }; - - local_responses.push((response.into(), src)); - } - } - } - } + handle_request( + config, + connections, + &mut access_list_cache, + rng, + request_sender, + local_responses, + valid_until, + res_request, + src, + ); } Err(err) => { if err.kind() == ErrorKind::WouldBlock { @@ -314,16 +224,3 @@ fn send_responses( .fetch_add(bytes_sent, Ordering::SeqCst); } } - -fn ip_version_from_ip(ip: IpAddr) -> IpVersion { - match ip { - IpAddr::V4(_) => IpVersion::IPv4, - IpAddr::V6(ip) => { - if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { - IpVersion::IPv4 - } else { - IpVersion::IPv6 - } - } - } -} diff --git a/aquatic_udp/src/lib/other/network_uring.rs b/aquatic_udp/src/lib/other/network_uring.rs index 520fff9..184e092 100644 --- a/aquatic_udp/src/lib/other/network_uring.rs +++ b/aquatic_udp/src/lib/other/network_uring.rs @@ -1,6 +1,6 @@ use std::io::Cursor; use std::mem::size_of; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4}; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4}; use std::os::unix::prelude::AsRawFd; use std::ptr::null_mut; use std::sync::{ @@ -9,18 +9,17 @@ use std::sync::{ }; use std::time::{Duration, Instant}; -use aquatic_common::access_list::{create_access_list_cache, AccessListCache}; +use aquatic_common::access_list::create_access_list_cache; use crossbeam_channel::{Receiver, Sender}; use io_uring::types::{Fixed, Timespec}; use io_uring::SubmissionQueue; use libc::{ c_void, in6_addr, in_addr, iovec, msghdr, sockaddr_in, sockaddr_in6, AF_INET, AF_INET6, }; -use rand::prelude::{Rng, SeedableRng, StdRng}; +use rand::prelude::{SeedableRng, StdRng}; use slab::Slab; -use socket2::{Domain, Protocol, Socket, Type}; -use aquatic_udp_protocol::{IpVersion, Request, Response}; +use aquatic_udp_protocol::{Request, Response}; use crate::common::handlers::*; use crate::common::network::ConnectionMap; @@ -248,14 +247,17 @@ pub fn run_socket_worker( config.protocol.max_scrape_torrents, ); + // FIXME: don't run every iteration + let valid_until = ValidUntil::new(config.cleaning.max_connection_age); + handle_request( &config, - &state, &mut connections, &mut access_list_cache, &mut rng, &request_sender, &mut local_responses, + valid_until, res_request, addr, ); @@ -376,88 +378,6 @@ pub fn run_socket_worker( } } -fn handle_request( - config: &Config, - state: &State, - connections: &mut ConnectionMap, - access_list_cache: &mut AccessListCache, - rng: &mut StdRng, - request_sender: &Sender<(ConnectedRequest, SocketAddr)>, - local_responses: &mut Vec<(Response, SocketAddr)>, - res_request: Result, - src: SocketAddr, -) { - let valid_until = ValidUntil::new(config.cleaning.max_connection_age); - let access_list_mode = config.access_list.mode; - - match res_request { - Ok(Request::Connect(request)) => { - let connection_id = ConnectionId(rng.gen()); - - connections.insert(connection_id, src, valid_until); - - let response = Response::Connect(ConnectResponse { - connection_id, - transaction_id: request.transaction_id, - }); - - local_responses.push((response, src)) - } - Ok(Request::Announce(request)) => { - if connections.contains(request.connection_id, src) { - if access_list_cache - .load() - .allows(access_list_mode, &request.info_hash.0) - { - if let Err(err) = - request_sender.try_send((ConnectedRequest::Announce(request), src)) - { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } - } else { - let response = Response::Error(ErrorResponse { - transaction_id: request.transaction_id, - message: "Info hash not allowed".into(), - }); - - local_responses.push((response, src)) - } - } - } - Ok(Request::Scrape(request)) => { - if connections.contains(request.connection_id, src) { - let request = ConnectedRequest::Scrape { - request, - original_indices: Vec::new(), - }; - - if let Err(err) = request_sender.try_send((request, src)) { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } - } - } - Err(err) => { - ::log::debug!("Request::from_bytes error: {:?}", err); - - if let RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } = err - { - if connections.contains(connection_id, src) { - let response = ErrorResponse { - transaction_id, - message: err.right_or("Parse error").into(), - }; - - local_responses.push((response.into(), src)); - } - } - } - } -} - fn queue_response( config: &Config, sq: &mut SubmissionQueue, @@ -525,52 +445,6 @@ fn queue_response( } } -fn create_socket(config: &Config) -> ::std::net::UdpSocket { - let socket = if config.network.address.is_ipv4() { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } - .expect("create socket"); - - socket.set_reuse_port(true).expect("socket: set reuse port"); - - socket - .set_nonblocking(true) - .expect("socket: set nonblocking"); - - socket - .bind(&config.network.address.into()) - .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { - ::log::error!( - "socket: failed setting recv buffer to {}: {:?}", - recv_buffer_size, - err - ); - } - } - - socket.into() -} - -fn ip_version_from_ip(ip: IpAddr) -> IpVersion { - match ip { - IpAddr::V4(_) => IpVersion::IPv4, - IpAddr::V6(ip) => { - if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { - IpVersion::IPv4 - } else { - IpVersion::IPv6 - } - } - } -} - #[cfg(test)] mod tests { use quickcheck::Arbitrary; From 1e9376302d9068a33828dbcaf58fbb79f1ca1109 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 03:28:41 +0100 Subject: [PATCH 09/56] udp: fix some config feature issues --- aquatic_udp/src/lib/config.rs | 16 ++++++++-------- aquatic_udp/src/lib/lib.rs | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index 1e93d0e..45c150d 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -18,9 +18,9 @@ pub struct Config { pub log_level: LogLevel, pub network: NetworkConfig, pub protocol: ProtocolConfig, - #[cfg(feature = "with-mio")] + #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] pub handlers: HandlerConfig, - #[cfg(feature = "with-mio")] + #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] pub statistics: StatisticsConfig, pub cleaning: CleaningConfig, pub privileges: PrivilegeConfig, @@ -70,7 +70,7 @@ pub struct ProtocolConfig { pub peer_announce_interval: i32, } -#[cfg(feature = "with-mio")] +#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct HandlerConfig { @@ -80,7 +80,7 @@ pub struct HandlerConfig { pub channel_recv_timeout_microseconds: u64, } -#[cfg(feature = "with-mio")] +#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct StatisticsConfig { @@ -109,9 +109,9 @@ impl Default for Config { log_level: LogLevel::Error, network: NetworkConfig::default(), protocol: ProtocolConfig::default(), - #[cfg(feature = "with-mio")] + #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] handlers: HandlerConfig::default(), - #[cfg(feature = "with-mio")] + #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] statistics: StatisticsConfig::default(), cleaning: CleaningConfig::default(), privileges: PrivilegeConfig::default(), @@ -143,7 +143,7 @@ impl Default for ProtocolConfig { } } -#[cfg(feature = "with-mio")] +#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] impl Default for HandlerConfig { fn default() -> Self { Self { @@ -153,7 +153,7 @@ impl Default for HandlerConfig { } } -#[cfg(feature = "with-mio")] +#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] impl Default for StatisticsConfig { fn default() -> Self { Self { interval: 0 } diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index 2f3c924..cc4403d 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -4,7 +4,7 @@ pub mod common; pub mod config; #[cfg(all(feature = "with-glommio", target_os = "linux"))] pub mod glommio; -#[cfg(feature = "with-mio")] +#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] pub mod other; use config::Config; From 7b20942d0ffc0d98fc257744f82dba0397e36e44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 03:31:59 +0100 Subject: [PATCH 10/56] Update TODO --- TODO.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/TODO.md b/TODO.md index 6f04a76..50b9bca 100644 --- a/TODO.md +++ b/TODO.md @@ -17,13 +17,20 @@ * cargo-deny * aquatic_udp + * uring + * ValidUntil periodic update + * statistics + * ipv6_only + * shared config keys such as poll interval + * mio + * stagger connection cleaning intervals? + * ipv4-mapped addresses + * ipv6_only * glommio * consider sending local responses immediately * consider adding ConnectedScrapeRequest::Scrape(PendingScrapeRequest) containing TransactionId and BTreeMap, and same for response - * mio - * stagger connection cleaning intervals? * aquatic_http: * clean out connections regularly From 4641dd29f222ccc32e8a48291b0f3a41c212962d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 21:59:06 +0100 Subject: [PATCH 11/56] udp: remove glommio implementation --- Cargo.lock | 3 +- aquatic_udp/Cargo.toml | 17 +- aquatic_udp/src/lib/common/mod.rs | 76 +++- aquatic_udp/src/lib/common/network.rs | 123 ++++- aquatic_udp/src/lib/config.rs | 8 - aquatic_udp/src/lib/glommio/common.rs | 8 - aquatic_udp/src/lib/glommio/handlers.rs | 117 ----- aquatic_udp/src/lib/glommio/mod.rs | 135 ------ aquatic_udp/src/lib/glommio/network.rs | 428 ------------------ aquatic_udp/src/lib/{common => }/handlers.rs | 113 ++++- aquatic_udp/src/lib/lib.rs | 175 ++++++- .../src/lib/{other => }/network_mio.rs | 6 +- .../src/lib/{other => }/network_uring.rs | 5 +- aquatic_udp/src/lib/other/common.rs | 166 ------- aquatic_udp/src/lib/other/handlers.rs | 98 ---- aquatic_udp/src/lib/other/mod.rs | 172 ------- aquatic_udp/src/lib/{other => }/tasks.rs | 0 aquatic_udp_bench/Cargo.toml | 1 + aquatic_udp_bench/src/announce.rs | 2 +- aquatic_udp_bench/src/main.rs | 6 +- aquatic_udp_bench/src/scrape.rs | 2 +- scripts/run-aquatic-udp.sh | 6 +- 22 files changed, 465 insertions(+), 1202 deletions(-) delete mode 100644 aquatic_udp/src/lib/glommio/common.rs delete mode 100644 aquatic_udp/src/lib/glommio/handlers.rs delete mode 100644 aquatic_udp/src/lib/glommio/mod.rs delete mode 100644 aquatic_udp/src/lib/glommio/network.rs rename aquatic_udp/src/lib/{common => }/handlers.rs (68%) rename aquatic_udp/src/lib/{other => }/network_mio.rs (98%) rename aquatic_udp/src/lib/{other => }/network_uring.rs (99%) delete mode 100644 aquatic_udp/src/lib/other/common.rs delete mode 100644 aquatic_udp/src/lib/other/handlers.rs delete mode 100644 aquatic_udp/src/lib/other/mod.rs rename aquatic_udp/src/lib/{other => }/tasks.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 37e798c..62ea631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -182,8 +182,6 @@ dependencies = [ "bytemuck", "cfg-if", "crossbeam-channel", - "futures-lite", - "glommio", "hex", "histogram", "io-uring", @@ -208,6 +206,7 @@ dependencies = [ "anyhow", "aquatic_cli_helpers", "aquatic_udp", + "aquatic_udp_protocol", "crossbeam-channel", "indicatif", "mimalloc", diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index d7a7b90..f2756e4 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -17,9 +17,8 @@ name = "aquatic_udp" [features] default = ["with-mio"] cpu-pinning = ["aquatic_common/cpu-pinning"] -with-glommio = ["cpu-pinning", "glommio", "futures-lite"] -with-mio = ["crossbeam-channel", "histogram", "mio", "socket2"] -with-io-uring = ["crossbeam-channel", "histogram", "socket2", "io-uring", "libc", "bytemuck"] +with-mio = ["mio"] +with-io-uring = ["io-uring", "libc", "bytemuck"] [dependencies] anyhow = "1" @@ -27,7 +26,9 @@ aquatic_cli_helpers = "0.1.0" aquatic_common = "0.1.0" aquatic_udp_protocol = "0.1.0" cfg-if = "1" +crossbeam-channel = "0.5" hex = "0.4" +histogram = "0.6" log = "0.4" mimalloc = { version = "0.1", default-features = false } parking_lot = "0.11" @@ -35,11 +36,7 @@ rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } slab = "0.4" signal-hook = { version = "0.3" } - -# mio / io-uring -crossbeam-channel = { version = "0.5", optional = true } -histogram = { version = "0.6", optional = true } -socket2 = { version = "0.4.1", features = ["all"], optional = true } +socket2 = { version = "0.4.1", features = ["all"] } # mio mio = { version = "0.7", features = ["udp", "os-poll", "os-util"], optional = true } @@ -49,10 +46,6 @@ io-uring = { version = "0.5", optional = true } libc = { version = "0.2", optional = true } bytemuck = { version = "1", optional = true } -# glommio -glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5", optional = true } -futures-lite = { version = "1", optional = true } - [dev-dependencies] quickcheck = "1.0" quickcheck_macros = "1.0" diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common/mod.rs index ea55cce..8a29d21 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common/mod.rs @@ -1,21 +1,52 @@ use std::hash::Hash; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::sync::atomic::AtomicUsize; use std::sync::Arc; use std::time::Instant; +use parking_lot::Mutex; +use socket2::{Domain, Protocol, Socket, Type}; + use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap}; use aquatic_common::AHashIndexMap; - -pub use aquatic_common::{access_list::AccessList, ValidUntil}; -pub use aquatic_udp_protocol::*; +use aquatic_common::ValidUntil; +use aquatic_udp_protocol::*; use crate::config::Config; -pub mod handlers; pub mod network; pub const MAX_PACKET_SIZE: usize = 8192; +#[derive(Debug)] +pub enum ConnectedRequest { + Announce(AnnounceRequest), + Scrape { + request: ScrapeRequest, + /// Currently only used by glommio implementation + original_indices: Vec, + }, +} + +#[derive(Debug)] +pub enum ConnectedResponse { + Announce(AnnounceResponse), + Scrape { + response: ScrapeResponse, + /// Currently only used by glommio implementation + original_indices: Vec, + }, +} + +impl Into for ConnectedResponse { + fn into(self) -> Response { + match self { + Self::Announce(response) => Response::Announce(response), + Self::Scrape { response, .. } => Response::Scrape(response), + } + } +} + pub trait Ip: Hash + PartialEq + Eq + Clone + Copy { fn ip_addr(self) -> IpAddr; } @@ -160,6 +191,43 @@ impl TorrentMaps { } } +#[derive(Default)] +pub struct Statistics { + pub requests_received: AtomicUsize, + pub responses_sent: AtomicUsize, + pub bytes_received: AtomicUsize, + pub bytes_sent: AtomicUsize, +} + +#[derive(Clone)] +pub struct State { + pub access_list: Arc, + pub torrents: Arc>, + pub statistics: Arc, +} + +impl Default for State { + fn default() -> Self { + Self { + access_list: Arc::new(AccessListArcSwap::default()), + torrents: Arc::new(Mutex::new(TorrentMaps::default())), + statistics: Arc::new(Statistics::default()), + } + } +} +pub fn ip_version_from_ip(ip: IpAddr) -> IpVersion { + match ip { + IpAddr::V4(_) => IpVersion::IPv4, + IpAddr::V6(ip) => { + if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { + IpVersion::IPv4 + } else { + IpVersion::IPv6 + } + } + } +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv6Addr}; diff --git a/aquatic_udp/src/lib/common/network.rs b/aquatic_udp/src/lib/common/network.rs index e0cd81e..ca3edaf 100644 --- a/aquatic_udp/src/lib/common/network.rs +++ b/aquatic_udp/src/lib/common/network.rs @@ -1,8 +1,13 @@ use std::{net::SocketAddr, time::Instant}; +use aquatic_common::access_list::AccessListCache; use aquatic_common::AHashIndexMap; -pub use aquatic_common::{access_list::AccessList, ValidUntil}; -pub use aquatic_udp_protocol::*; +use aquatic_common::ValidUntil; +use aquatic_udp_protocol::*; +use crossbeam_channel::Sender; +use rand::{prelude::StdRng, Rng}; + +use crate::common::*; #[derive(Default)] pub struct ConnectionMap(AHashIndexMap<(ConnectionId, SocketAddr), ValidUntil>); @@ -28,3 +33,117 @@ impl ConnectionMap { self.0.shrink_to_fit(); } } + +pub fn handle_request( + config: &Config, + connections: &mut ConnectionMap, + access_list_cache: &mut AccessListCache, + rng: &mut StdRng, + request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + local_responses: &mut Vec<(Response, SocketAddr)>, + valid_until: ValidUntil, + res_request: Result, + src: SocketAddr, +) { + let access_list_mode = config.access_list.mode; + + match res_request { + Ok(Request::Connect(request)) => { + let connection_id = ConnectionId(rng.gen()); + + connections.insert(connection_id, src, valid_until); + + let response = Response::Connect(ConnectResponse { + connection_id, + transaction_id: request.transaction_id, + }); + + local_responses.push((response, src)) + } + Ok(Request::Announce(request)) => { + if connections.contains(request.connection_id, src) { + if access_list_cache + .load() + .allows(access_list_mode, &request.info_hash.0) + { + if let Err(err) = + request_sender.try_send((ConnectedRequest::Announce(request), src)) + { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } else { + let response = Response::Error(ErrorResponse { + transaction_id: request.transaction_id, + message: "Info hash not allowed".into(), + }); + + local_responses.push((response, src)) + } + } + } + Ok(Request::Scrape(request)) => { + if connections.contains(request.connection_id, src) { + let request = ConnectedRequest::Scrape { + request, + original_indices: Vec::new(), + }; + + if let Err(err) = request_sender.try_send((request, src)) { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } + } + Err(err) => { + ::log::debug!("Request::from_bytes error: {:?}", err); + + if let RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } = err + { + if connections.contains(connection_id, src) { + let response = ErrorResponse { + transaction_id, + message: err.right_or("Parse error").into(), + }; + + local_responses.push((response.into(), src)); + } + } + } + } +} + +pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { + let socket = if config.network.address.is_ipv4() { + Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) + } else { + Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) + } + .expect("create socket"); + + socket.set_reuse_port(true).expect("socket: set reuse port"); + + socket + .set_nonblocking(true) + .expect("socket: set nonblocking"); + + socket + .bind(&config.network.address.into()) + .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); + + let recv_buffer_size = config.network.socket_recv_buffer_size; + + if recv_buffer_size != 0 { + if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { + ::log::error!( + "socket: failed setting recv buffer to {}: {:?}", + recv_buffer_size, + err + ); + } + } + + socket.into() +} diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index 45c150d..f6c0247 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -18,9 +18,7 @@ pub struct Config { pub log_level: LogLevel, pub network: NetworkConfig, pub protocol: ProtocolConfig, - #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] pub handlers: HandlerConfig, - #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] pub statistics: StatisticsConfig, pub cleaning: CleaningConfig, pub privileges: PrivilegeConfig, @@ -70,7 +68,6 @@ pub struct ProtocolConfig { pub peer_announce_interval: i32, } -#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct HandlerConfig { @@ -80,7 +77,6 @@ pub struct HandlerConfig { pub channel_recv_timeout_microseconds: u64, } -#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct StatisticsConfig { @@ -109,9 +105,7 @@ impl Default for Config { log_level: LogLevel::Error, network: NetworkConfig::default(), protocol: ProtocolConfig::default(), - #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] handlers: HandlerConfig::default(), - #[cfg(any(feature = "with-mio", feature = "with-io-uring"))] statistics: StatisticsConfig::default(), cleaning: CleaningConfig::default(), privileges: PrivilegeConfig::default(), @@ -143,7 +137,6 @@ impl Default for ProtocolConfig { } } -#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] impl Default for HandlerConfig { fn default() -> Self { Self { @@ -153,7 +146,6 @@ impl Default for HandlerConfig { } } -#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] impl Default for StatisticsConfig { fn default() -> Self { Self { interval: 0 } diff --git a/aquatic_udp/src/lib/glommio/common.rs b/aquatic_udp/src/lib/glommio/common.rs deleted file mode 100644 index 3506b09..0000000 --- a/aquatic_udp/src/lib/glommio/common.rs +++ /dev/null @@ -1,8 +0,0 @@ -use std::sync::Arc; - -use aquatic_common::access_list::AccessListArcSwap; - -#[derive(Default, Clone)] -pub struct State { - pub access_list: Arc, -} diff --git a/aquatic_udp/src/lib/glommio/handlers.rs b/aquatic_udp/src/lib/glommio/handlers.rs deleted file mode 100644 index 55adc4a..0000000 --- a/aquatic_udp/src/lib/glommio/handlers.rs +++ /dev/null @@ -1,117 +0,0 @@ -use std::cell::RefCell; -use std::net::SocketAddr; -use std::rc::Rc; -use std::time::Duration; - -use futures_lite::{Stream, StreamExt}; -use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders}; -use glommio::timer::TimerActionRepeat; -use glommio::{enclose, prelude::*}; -use rand::prelude::SmallRng; -use rand::SeedableRng; - -use crate::common::handlers::handle_announce_request; -use crate::common::handlers::*; -use crate::common::*; -use crate::config::Config; - -use super::common::State; - -pub async fn run_request_worker( - config: Config, - state: State, - request_mesh_builder: MeshBuilder<(usize, ConnectedRequest, SocketAddr), Partial>, - response_mesh_builder: MeshBuilder<(ConnectedResponse, SocketAddr), Partial>, -) { - let (_, mut request_receivers) = request_mesh_builder.join(Role::Consumer).await.unwrap(); - let (response_senders, _) = response_mesh_builder.join(Role::Producer).await.unwrap(); - let response_senders = Rc::new(response_senders); - - let torrents = Rc::new(RefCell::new(TorrentMaps::default())); - - // Periodically clean torrents - TimerActionRepeat::repeat(enclose!((config, torrents, state) move || { - enclose!((config, torrents, state) move || async move { - torrents.borrow_mut().clean(&config, &state.access_list); - - Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval)) - })() - })); - - let mut handles = Vec::new(); - - for (_, receiver) in request_receivers.streams() { - let handle = spawn_local(handle_request_stream( - config.clone(), - torrents.clone(), - response_senders.clone(), - receiver, - )) - .detach(); - - handles.push(handle); - } - - for handle in handles { - handle.await; - } -} - -async fn handle_request_stream( - config: Config, - torrents: Rc>, - response_senders: Rc>, - mut stream: S, -) where - S: Stream + ::std::marker::Unpin, -{ - let mut rng = SmallRng::from_entropy(); - - let max_peer_age = config.cleaning.max_peer_age; - let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(max_peer_age))); - - TimerActionRepeat::repeat(enclose!((peer_valid_until) move || { - enclose!((peer_valid_until) move || async move { - *peer_valid_until.borrow_mut() = ValidUntil::new(max_peer_age); - - Some(Duration::from_secs(1)) - })() - })); - - while let Some((producer_index, request, src)) = stream.next().await { - let response = match request { - ConnectedRequest::Announce(request) => { - ConnectedResponse::Announce(handle_announce_request( - &config, - &mut rng, - &mut torrents.borrow_mut(), - request, - src, - peer_valid_until.borrow().to_owned(), - )) - } - ConnectedRequest::Scrape { - request, - original_indices, - } => { - let response = handle_scrape_request(&mut torrents.borrow_mut(), src, request); - - ConnectedResponse::Scrape { - response, - original_indices, - } - } - }; - - ::log::debug!("preparing to send response to channel: {:?}", response); - - if let Err(err) = response_senders - .send_to(producer_index, (response, src)) - .await - { - ::log::error!("response_sender.send: {:?}", err); - } - - yield_if_needed().await; - } -} diff --git a/aquatic_udp/src/lib/glommio/mod.rs b/aquatic_udp/src/lib/glommio/mod.rs deleted file mode 100644 index 8cc8d27..0000000 --- a/aquatic_udp/src/lib/glommio/mod.rs +++ /dev/null @@ -1,135 +0,0 @@ -use std::sync::{atomic::AtomicUsize, Arc}; - -use aquatic_common::access_list::update_access_list; -use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex}; -use aquatic_common::privileges::drop_privileges_after_socket_binding; -use glommio::channels::channel_mesh::MeshBuilder; -use glommio::prelude::*; -use signal_hook::consts::SIGUSR1; -use signal_hook::iterator::Signals; - -use crate::config::Config; - -use self::common::State; - -mod common; -pub mod handlers; -pub mod network; - -pub const SHARED_CHANNEL_SIZE: usize = 4096; - -pub fn run(config: Config) -> ::anyhow::Result<()> { - let state = State::default(); - - update_access_list(&config.access_list, &state.access_list)?; - - let mut signals = Signals::new(::std::iter::once(SIGUSR1))?; - - { - let config = config.clone(); - let state = state.clone(); - - ::std::thread::spawn(move || run_inner(config, state)); - } - - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::Other, - ); - - for signal in &mut signals { - match signal { - SIGUSR1 => { - let _ = update_access_list(&config.access_list, &state.access_list); - } - _ => unreachable!(), - } - } - - Ok(()) -} - -pub fn run_inner(config: Config, state: State) -> anyhow::Result<()> { - let num_peers = config.socket_workers + config.request_workers; - - let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE); - let response_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE); - - let num_bound_sockets = Arc::new(AtomicUsize::new(0)); - - let mut executors = Vec::new(); - - for i in 0..(config.socket_workers) { - let config = config.clone(); - let state = state.clone(); - let request_mesh_builder = request_mesh_builder.clone(); - let response_mesh_builder = response_mesh_builder.clone(); - let num_bound_sockets = num_bound_sockets.clone(); - - let builder = LocalExecutorBuilder::default().name("socket"); - - let executor = builder.spawn(move || async move { - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::SocketWorker(i), - ); - - network::run_socket_worker( - config, - state, - request_mesh_builder, - response_mesh_builder, - num_bound_sockets, - ) - .await - }); - - executors.push(executor); - } - - for i in 0..(config.request_workers) { - let config = config.clone(); - let state = state.clone(); - let request_mesh_builder = request_mesh_builder.clone(); - let response_mesh_builder = response_mesh_builder.clone(); - - let builder = LocalExecutorBuilder::default().name("request"); - - let executor = builder.spawn(move || async move { - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::RequestWorker(i), - ); - - handlers::run_request_worker(config, state, request_mesh_builder, response_mesh_builder) - .await - }); - - executors.push(executor); - } - - drop_privileges_after_socket_binding( - &config.privileges, - num_bound_sockets, - config.socket_workers, - ) - .unwrap(); - - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::Other, - ); - - for executor in executors { - executor - .expect("failed to spawn local executor") - .join() - .unwrap(); - } - - Ok(()) -} diff --git a/aquatic_udp/src/lib/glommio/network.rs b/aquatic_udp/src/lib/glommio/network.rs deleted file mode 100644 index f8eb462..0000000 --- a/aquatic_udp/src/lib/glommio/network.rs +++ /dev/null @@ -1,428 +0,0 @@ -use std::cell::RefCell; -use std::collections::BTreeMap; -use std::io::Cursor; -use std::net::{IpAddr, SocketAddr}; -use std::rc::Rc; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; -use std::time::{Duration, Instant}; - -use aquatic_common::access_list::create_access_list_cache; -use aquatic_common::AHashIndexMap; -use futures_lite::{Stream, StreamExt}; -use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders}; -use glommio::channels::local_channel::{new_unbounded, LocalSender}; -use glommio::enclose; -use glommio::net::UdpSocket; -use glommio::prelude::*; -use glommio::timer::TimerActionRepeat; -use rand::prelude::{Rng, SeedableRng, StdRng}; - -use aquatic_udp_protocol::{IpVersion, Request, Response}; - -use super::common::State; - -use crate::common::handlers::*; -use crate::common::network::ConnectionMap; -use crate::common::*; -use crate::config::Config; - -const PENDING_SCRAPE_MAX_WAIT: u64 = 30; - -struct PendingScrapeResponse { - pending_worker_responses: usize, - valid_until: ValidUntil, - stats: BTreeMap, -} - -#[derive(Default)] -struct PendingScrapeResponses(AHashIndexMap); - -impl PendingScrapeResponses { - fn prepare( - &mut self, - transaction_id: TransactionId, - pending_worker_responses: usize, - valid_until: ValidUntil, - ) { - let pending = PendingScrapeResponse { - pending_worker_responses, - valid_until, - stats: BTreeMap::new(), - }; - - self.0.insert(transaction_id, pending); - } - - fn add_and_get_finished( - &mut self, - mut response: ScrapeResponse, - mut original_indices: Vec, - ) -> Option { - let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) { - r.pending_worker_responses -= 1; - - r.stats.extend( - original_indices - .drain(..) - .zip(response.torrent_stats.drain(..)), - ); - - r.pending_worker_responses == 0 - } else { - ::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map"); - - false - }; - - if finished { - let PendingScrapeResponse { stats, .. } = - self.0.remove(&response.transaction_id).unwrap(); - - Some(ScrapeResponse { - transaction_id: response.transaction_id, - torrent_stats: stats.into_values().collect(), - }) - } else { - None - } - } - - fn clean(&mut self) { - let now = Instant::now(); - - self.0.retain(|_, v| v.valid_until.0 > now); - self.0.shrink_to_fit(); - } -} - -pub async fn run_socket_worker( - config: Config, - state: State, - request_mesh_builder: MeshBuilder<(usize, ConnectedRequest, SocketAddr), Partial>, - response_mesh_builder: MeshBuilder<(ConnectedResponse, SocketAddr), Partial>, - num_bound_sockets: Arc, -) { - let (local_sender, local_receiver) = new_unbounded(); - - let mut socket = UdpSocket::bind(config.network.address).unwrap(); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - socket.set_buffer_size(recv_buffer_size); - } - - let socket = Rc::new(socket); - - num_bound_sockets.fetch_add(1, Ordering::SeqCst); - - let (request_senders, _) = request_mesh_builder.join(Role::Producer).await.unwrap(); - let (_, mut response_receivers) = response_mesh_builder.join(Role::Consumer).await.unwrap(); - - let response_consumer_index = response_receivers.consumer_id().unwrap(); - - let pending_scrape_responses = Rc::new(RefCell::new(PendingScrapeResponses::default())); - - // Periodically clean pending_scrape_responses - TimerActionRepeat::repeat(enclose!((pending_scrape_responses) move || { - enclose!((pending_scrape_responses) move || async move { - pending_scrape_responses.borrow_mut().clean(); - - Some(Duration::from_secs(120)) - })() - })); - - spawn_local(enclose!((pending_scrape_responses) read_requests( - config.clone(), - state, - request_senders, - response_consumer_index, - local_sender, - socket.clone(), - pending_scrape_responses, - ))) - .detach(); - - for (_, receiver) in response_receivers.streams().into_iter() { - spawn_local(enclose!((pending_scrape_responses) handle_shared_responses( - socket.clone(), - pending_scrape_responses, - receiver, - ))) - .detach(); - } - - send_local_responses(socket, local_receiver.stream()).await; -} - -async fn read_requests( - config: Config, - state: State, - request_senders: Senders<(usize, ConnectedRequest, SocketAddr)>, - response_consumer_index: usize, - local_sender: LocalSender<(Response, SocketAddr)>, - socket: Rc, - pending_scrape_responses: Rc>, -) { - let mut rng = StdRng::from_entropy(); - - let access_list_mode = config.access_list.mode; - - let max_connection_age = config.cleaning.max_connection_age; - let connection_valid_until = Rc::new(RefCell::new(ValidUntil::new(max_connection_age))); - let pending_scrape_valid_until = - Rc::new(RefCell::new(ValidUntil::new(PENDING_SCRAPE_MAX_WAIT))); - let connections = Rc::new(RefCell::new(ConnectionMap::default())); - let mut access_list_cache = create_access_list_cache(&state.access_list); - - // Periodically update connection_valid_until - TimerActionRepeat::repeat(enclose!((connection_valid_until) move || { - enclose!((connection_valid_until) move || async move { - *connection_valid_until.borrow_mut() = ValidUntil::new(max_connection_age); - - Some(Duration::from_secs(1)) - })() - })); - - // Periodically update pending_scrape_valid_until - TimerActionRepeat::repeat(enclose!((pending_scrape_valid_until) move || { - enclose!((pending_scrape_valid_until) move || async move { - *pending_scrape_valid_until.borrow_mut() = ValidUntil::new(PENDING_SCRAPE_MAX_WAIT); - - Some(Duration::from_secs(10)) - })() - })); - - // Periodically clean connections - TimerActionRepeat::repeat(enclose!((config, connections) move || { - enclose!((config, connections) move || async move { - connections.borrow_mut().clean(); - - Some(Duration::from_secs(config.cleaning.connection_cleaning_interval)) - })() - })); - - let mut buf = [0u8; MAX_PACKET_SIZE]; - - loop { - match socket.recv_from(&mut buf).await { - Ok((amt, src)) => { - let request = Request::from_bytes(&buf[..amt], config.protocol.max_scrape_torrents); - - ::log::debug!("read request: {:?}", request); - - match request { - Ok(Request::Connect(request)) => { - let connection_id = ConnectionId(rng.gen()); - - connections.borrow_mut().insert( - connection_id, - src, - connection_valid_until.borrow().to_owned(), - ); - - let response = Response::Connect(ConnectResponse { - connection_id, - transaction_id: request.transaction_id, - }); - - local_sender.try_send((response, src)).unwrap(); - } - Ok(Request::Announce(request)) => { - if connections.borrow().contains(request.connection_id, src) { - if access_list_cache - .load() - .allows(access_list_mode, &request.info_hash.0) - { - let request_consumer_index = - calculate_request_consumer_index(&config, request.info_hash); - - if let Err(err) = request_senders - .send_to( - request_consumer_index, - ( - response_consumer_index, - ConnectedRequest::Announce(request), - src, - ), - ) - .await - { - ::log::error!("request_sender.try_send failed: {:?}", err) - } - } else { - let response = Response::Error(ErrorResponse { - transaction_id: request.transaction_id, - message: "Info hash not allowed".into(), - }); - - local_sender.try_send((response, src)).unwrap(); - } - } - } - Ok(Request::Scrape(ScrapeRequest { - transaction_id, - connection_id, - info_hashes, - })) => { - if connections.borrow().contains(connection_id, src) { - let mut consumer_requests: AHashIndexMap< - usize, - (ScrapeRequest, Vec), - > = Default::default(); - - for (i, info_hash) in info_hashes.into_iter().enumerate() { - let (req, indices) = consumer_requests - .entry(calculate_request_consumer_index(&config, info_hash)) - .or_insert_with(|| { - let request = ScrapeRequest { - transaction_id: transaction_id, - connection_id: connection_id, - info_hashes: Vec::new(), - }; - - (request, Vec::new()) - }); - - req.info_hashes.push(info_hash); - indices.push(i); - } - - pending_scrape_responses.borrow_mut().prepare( - transaction_id, - consumer_requests.len(), - pending_scrape_valid_until.borrow().to_owned(), - ); - - for (consumer_index, (request, original_indices)) in consumer_requests { - let request = ConnectedRequest::Scrape { - request, - original_indices, - }; - - if let Err(err) = request_senders - .send_to( - consumer_index, - (response_consumer_index, request, src), - ) - .await - { - ::log::error!("request_sender.send failed: {:?}", err) - } - } - } - } - Err(err) => { - ::log::debug!("Request::from_bytes error: {:?}", err); - - if let RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } = err - { - if connections.borrow().contains(connection_id, src) { - let response = ErrorResponse { - transaction_id, - message: err.right_or("Parse error").into(), - }; - - local_sender.try_send((response.into(), src)).unwrap(); - } - } - } - } - } - Err(err) => { - ::log::error!("recv_from: {:?}", err); - } - } - - yield_if_needed().await; - } -} - -async fn handle_shared_responses( - socket: Rc, - pending_scrape_responses: Rc>, - mut stream: S, -) where - S: Stream + ::std::marker::Unpin, -{ - let mut buf = [0u8; MAX_PACKET_SIZE]; - let mut buf = Cursor::new(&mut buf[..]); - - while let Some((response, addr)) = stream.next().await { - let opt_response = match response { - ConnectedResponse::Announce(response) => Some((Response::Announce(response), addr)), - ConnectedResponse::Scrape { - response, - original_indices, - } => pending_scrape_responses - .borrow_mut() - .add_and_get_finished(response, original_indices) - .map(|response| (Response::Scrape(response), addr)), - }; - - if let Some((response, addr)) = opt_response { - write_response_to_socket(&socket, &mut buf, addr, response).await; - } - - yield_if_needed().await; - } -} - -async fn send_local_responses(socket: Rc, mut stream: S) -where - S: Stream + ::std::marker::Unpin, -{ - let mut buf = [0u8; MAX_PACKET_SIZE]; - let mut buf = Cursor::new(&mut buf[..]); - - while let Some((response, addr)) = stream.next().await { - write_response_to_socket(&socket, &mut buf, addr, response).await; - - yield_if_needed().await; - } -} - -async fn write_response_to_socket( - socket: &Rc, - buf: &mut Cursor<&mut [u8]>, - addr: SocketAddr, - response: Response, -) { - buf.set_position(0); - - ::log::debug!("preparing to send response: {:?}", response.clone()); - - response - .write(buf, ip_version_from_ip(addr.ip())) - .expect("write response"); - - let position = buf.position() as usize; - - if let Err(err) = socket.send_to(&buf.get_ref()[..position], addr).await { - ::log::info!("send_to failed: {:?}", err); - } -} - -fn calculate_request_consumer_index(config: &Config, info_hash: InfoHash) -> usize { - (info_hash.0[0] as usize) % config.request_workers -} - -fn ip_version_from_ip(ip: IpAddr) -> IpVersion { - match ip { - IpAddr::V4(_) => IpVersion::IPv4, - IpAddr::V6(ip) => { - if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { - IpVersion::IPv4 - } else { - IpVersion::IPv6 - } - } - } -} diff --git a/aquatic_udp/src/lib/common/handlers.rs b/aquatic_udp/src/lib/handlers.rs similarity index 68% rename from aquatic_udp/src/lib/common/handlers.rs rename to aquatic_udp/src/lib/handlers.rs index d14b630..c77ce89 100644 --- a/aquatic_udp/src/lib/common/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -1,37 +1,101 @@ +use std::net::IpAddr; use std::net::SocketAddr; +use std::time::Duration; -use rand::rngs::SmallRng; +use aquatic_common::ValidUntil; +use crossbeam_channel::{Receiver, Sender}; +use rand::{rngs::SmallRng, SeedableRng}; use aquatic_common::convert_ipv4_mapped_ipv6; use aquatic_common::extract_response_peers; +use aquatic_udp_protocol::*; + use crate::common::*; +use crate::config::Config; -#[derive(Debug)] -pub enum ConnectedRequest { - Announce(AnnounceRequest), - Scrape { - request: ScrapeRequest, - /// Currently only used by glommio implementation - original_indices: Vec, - }, -} +pub fn run_request_worker( + state: State, + config: Config, + request_receiver: Receiver<(ConnectedRequest, SocketAddr)>, + response_sender: Sender<(ConnectedResponse, SocketAddr)>, +) { + let mut announce_requests: Vec<(AnnounceRequest, SocketAddr)> = Vec::new(); + let mut scrape_requests: Vec<(ScrapeRequest, SocketAddr)> = Vec::new(); + let mut responses: Vec<(ConnectedResponse, SocketAddr)> = Vec::new(); -#[derive(Debug)] -pub enum ConnectedResponse { - Announce(AnnounceResponse), - Scrape { - response: ScrapeResponse, - /// Currently only used by glommio implementation - original_indices: Vec, - }, -} + let mut small_rng = SmallRng::from_entropy(); -impl Into for ConnectedResponse { - fn into(self) -> Response { - match self { - Self::Announce(response) => Response::Announce(response), - Self::Scrape { response, .. } => Response::Scrape(response), + let timeout = Duration::from_micros(config.handlers.channel_recv_timeout_microseconds); + + loop { + let mut opt_torrents = None; + + // Collect requests from channel, divide them by type + // + // Collect a maximum number of request. Stop collecting before that + // number is reached if having waited for too long for a request, but + // only if TorrentMaps mutex isn't locked. + for i in 0..config.handlers.max_requests_per_iter { + let (request, src): (ConnectedRequest, SocketAddr) = if i == 0 { + match request_receiver.recv() { + Ok(r) => r, + Err(_) => break, // Really shouldn't happen + } + } else { + match request_receiver.recv_timeout(timeout) { + Ok(r) => r, + Err(_) => { + if let Some(guard) = state.torrents.try_lock() { + opt_torrents = Some(guard); + + break; + } else { + continue; + } + } + } + }; + + match request { + ConnectedRequest::Announce(request) => announce_requests.push((request, src)), + ConnectedRequest::Scrape { request, .. } => scrape_requests.push((request, src)), + } + } + + // Generate responses for announce and scrape requests, then drop MutexGuard. + { + let mut torrents = opt_torrents.unwrap_or_else(|| state.torrents.lock()); + + let peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); + + responses.extend(announce_requests.drain(..).map(|(request, src)| { + let response = handle_announce_request( + &config, + &mut small_rng, + &mut torrents, + request, + src, + peer_valid_until, + ); + + (ConnectedResponse::Announce(response), src) + })); + + responses.extend(scrape_requests.drain(..).map(|(request, src)| { + let response = ConnectedResponse::Scrape { + response: handle_scrape_request(&mut torrents, src, request), + original_indices: Vec::new(), + }; + + (response, src) + })); + } + + for r in responses.drain(..) { + if let Err(err) = response_sender.send(r) { + ::log::error!("error sending response to channel: {}", err); + } } } } @@ -143,7 +207,6 @@ fn calc_max_num_peers_to_take(config: &Config, peers_wanted: i32) -> usize { } } -#[inline] pub fn handle_scrape_request( torrents: &mut TorrentMaps, src: SocketAddr, diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index cc4403d..a08b862 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -1,22 +1,175 @@ -use cfg_if::cfg_if; - pub mod common; pub mod config; -#[cfg(all(feature = "with-glommio", target_os = "linux"))] -pub mod glommio; -#[cfg(any(feature = "with-mio", feature = "with-io-uring"))] -pub mod other; +pub mod handlers; +#[cfg(feature = "with-mio")] +pub mod network_mio; +#[cfg(feature = "with-io-uring")] +pub mod network_uring; +pub mod tasks; use config::Config; +use std::sync::{atomic::AtomicUsize, Arc}; +use std::thread::Builder; +use std::time::Duration; + +use anyhow::Context; +#[cfg(feature = "cpu-pinning")] +use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex}; +use aquatic_common::privileges::drop_privileges_after_socket_binding; +use crossbeam_channel::unbounded; + +use aquatic_common::access_list::update_access_list; +use signal_hook::consts::SIGUSR1; +use signal_hook::iterator::Signals; + +use common::State; + pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker"; pub fn run(config: Config) -> ::anyhow::Result<()> { - cfg_if! { - if #[cfg(all(feature = "with-glommio", target_os = "linux"))] { - glommio::run(config) - } else { - other::run(config) + let state = State::default(); + + update_access_list(&config.access_list, &state.access_list)?; + + let mut signals = Signals::new(::std::iter::once(SIGUSR1))?; + + { + let config = config.clone(); + let state = state.clone(); + + ::std::thread::spawn(move || run_inner(config, state)); + } + + #[cfg(feature = "cpu-pinning")] + pin_current_if_configured_to( + &config.cpu_pinning, + config.socket_workers, + WorkerIndex::Other, + ); + + for signal in &mut signals { + match signal { + SIGUSR1 => { + let _ = update_access_list(&config.access_list, &state.access_list); + } + _ => unreachable!(), } } + + Ok(()) +} + +pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { + let num_bound_sockets = Arc::new(AtomicUsize::new(0)); + + let (request_sender, request_receiver) = unbounded(); + let (response_sender, response_receiver) = unbounded(); + + for i in 0..config.request_workers { + let state = state.clone(); + let config = config.clone(); + let request_receiver = request_receiver.clone(); + let response_sender = response_sender.clone(); + + Builder::new() + .name(format!("request-{:02}", i + 1)) + .spawn(move || { + #[cfg(feature = "cpu-pinning")] + pin_current_if_configured_to( + &config.cpu_pinning, + config.socket_workers, + WorkerIndex::RequestWorker(i), + ); + + handlers::run_request_worker(state, config, request_receiver, response_sender) + }) + .with_context(|| "spawn request worker")?; + } + + for i in 0..config.socket_workers { + let state = state.clone(); + let config = config.clone(); + let request_sender = request_sender.clone(); + let response_receiver = response_receiver.clone(); + let num_bound_sockets = num_bound_sockets.clone(); + + Builder::new() + .name(format!("socket-{:02}", i + 1)) + .spawn(move || { + #[cfg(feature = "cpu-pinning")] + pin_current_if_configured_to( + &config.cpu_pinning, + config.socket_workers, + WorkerIndex::SocketWorker(i), + ); + + cfg_if::cfg_if!( + if #[cfg(feature = "with-io-uring")] { + network_uring::run_socket_worker( + state, + config, + request_sender, + response_receiver, + num_bound_sockets, + ); + } else { + network_mio::run_socket_worker( + state, + config, + i, + request_sender, + response_receiver, + num_bound_sockets, + ); + } + ); + }) + .with_context(|| "spawn socket worker")?; + } + + if config.statistics.interval != 0 { + let state = state.clone(); + let config = config.clone(); + + Builder::new() + .name("statistics-collector".to_string()) + .spawn(move || { + #[cfg(feature = "cpu-pinning")] + pin_current_if_configured_to( + &config.cpu_pinning, + config.socket_workers, + WorkerIndex::Other, + ); + + loop { + ::std::thread::sleep(Duration::from_secs(config.statistics.interval)); + + tasks::gather_and_print_statistics(&state, &config); + } + }) + .with_context(|| "spawn statistics worker")?; + } + + drop_privileges_after_socket_binding( + &config.privileges, + num_bound_sockets, + config.socket_workers, + ) + .unwrap(); + + #[cfg(feature = "cpu-pinning")] + pin_current_if_configured_to( + &config.cpu_pinning, + config.socket_workers, + WorkerIndex::Other, + ); + + loop { + ::std::thread::sleep(Duration::from_secs( + config.cleaning.torrent_cleaning_interval, + )); + + state.torrents.lock().clean(&config, &state.access_list); + } } diff --git a/aquatic_udp/src/lib/other/network_mio.rs b/aquatic_udp/src/lib/network_mio.rs similarity index 98% rename from aquatic_udp/src/lib/other/network_mio.rs rename to aquatic_udp/src/lib/network_mio.rs index d04fb2a..164aeb1 100644 --- a/aquatic_udp/src/lib/other/network_mio.rs +++ b/aquatic_udp/src/lib/network_mio.rs @@ -8,6 +8,7 @@ use std::time::{Duration, Instant}; use std::vec::Drain; use aquatic_common::access_list::create_access_list_cache; +use aquatic_common::ValidUntil; use crossbeam_channel::{Receiver, Sender}; use mio::net::UdpSocket; use mio::{Events, Interest, Poll, Token}; @@ -15,13 +16,10 @@ use rand::prelude::{SeedableRng, StdRng}; use aquatic_udp_protocol::{Request, Response}; -use crate::common::handlers::*; -use crate::common::network::ConnectionMap; +use crate::common::network::*; use crate::common::*; use crate::config::Config; -use super::common::*; - pub fn run_socket_worker( state: State, config: Config, diff --git a/aquatic_udp/src/lib/other/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs similarity index 99% rename from aquatic_udp/src/lib/other/network_uring.rs rename to aquatic_udp/src/lib/network_uring.rs index 184e092..dc2722e 100644 --- a/aquatic_udp/src/lib/other/network_uring.rs +++ b/aquatic_udp/src/lib/network_uring.rs @@ -10,6 +10,7 @@ use std::sync::{ use std::time::{Duration, Instant}; use aquatic_common::access_list::create_access_list_cache; +use aquatic_common::ValidUntil; use crossbeam_channel::{Receiver, Sender}; use io_uring::types::{Fixed, Timespec}; use io_uring::SubmissionQueue; @@ -21,13 +22,11 @@ use slab::Slab; use aquatic_udp_protocol::{Request, Response}; -use crate::common::handlers::*; use crate::common::network::ConnectionMap; +use crate::common::network::*; use crate::common::*; use crate::config::Config; -use super::common::*; - const RING_SIZE: usize = 128; const MAX_RECV_EVENTS: usize = 1; const MAX_SEND_EVENTS: usize = RING_SIZE - MAX_RECV_EVENTS - 1; diff --git a/aquatic_udp/src/lib/other/common.rs b/aquatic_udp/src/lib/other/common.rs deleted file mode 100644 index a1f62f8..0000000 --- a/aquatic_udp/src/lib/other/common.rs +++ /dev/null @@ -1,166 +0,0 @@ -use aquatic_common::access_list::{AccessListArcSwap, AccessListCache}; -use aquatic_udp_protocol::*; -use crossbeam_channel::Sender; -use parking_lot::Mutex; -use rand::{prelude::StdRng, Rng}; -use socket2::{Domain, Protocol, Socket, Type}; -use std::{ - net::{IpAddr, SocketAddr}, - sync::{atomic::AtomicUsize, Arc}, -}; - -use crate::common::*; -use crate::common::{handlers::ConnectedRequest, network::ConnectionMap}; -use crate::config::Config; - -#[derive(Default)] -pub struct Statistics { - pub requests_received: AtomicUsize, - pub responses_sent: AtomicUsize, - pub bytes_received: AtomicUsize, - pub bytes_sent: AtomicUsize, -} - -#[derive(Clone)] -pub struct State { - pub access_list: Arc, - pub torrents: Arc>, - pub statistics: Arc, -} - -impl Default for State { - fn default() -> Self { - Self { - access_list: Arc::new(AccessListArcSwap::default()), - torrents: Arc::new(Mutex::new(TorrentMaps::default())), - statistics: Arc::new(Statistics::default()), - } - } -} - -pub fn handle_request( - config: &Config, - connections: &mut ConnectionMap, - access_list_cache: &mut AccessListCache, - rng: &mut StdRng, - request_sender: &Sender<(ConnectedRequest, SocketAddr)>, - local_responses: &mut Vec<(Response, SocketAddr)>, - valid_until: ValidUntil, - res_request: Result, - src: SocketAddr, -) { - let access_list_mode = config.access_list.mode; - - match res_request { - Ok(Request::Connect(request)) => { - let connection_id = ConnectionId(rng.gen()); - - connections.insert(connection_id, src, valid_until); - - let response = Response::Connect(ConnectResponse { - connection_id, - transaction_id: request.transaction_id, - }); - - local_responses.push((response, src)) - } - Ok(Request::Announce(request)) => { - if connections.contains(request.connection_id, src) { - if access_list_cache - .load() - .allows(access_list_mode, &request.info_hash.0) - { - if let Err(err) = - request_sender.try_send((ConnectedRequest::Announce(request), src)) - { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } - } else { - let response = Response::Error(ErrorResponse { - transaction_id: request.transaction_id, - message: "Info hash not allowed".into(), - }); - - local_responses.push((response, src)) - } - } - } - Ok(Request::Scrape(request)) => { - if connections.contains(request.connection_id, src) { - let request = ConnectedRequest::Scrape { - request, - original_indices: Vec::new(), - }; - - if let Err(err) = request_sender.try_send((request, src)) { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } - } - } - Err(err) => { - ::log::debug!("Request::from_bytes error: {:?}", err); - - if let RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } = err - { - if connections.contains(connection_id, src) { - let response = ErrorResponse { - transaction_id, - message: err.right_or("Parse error").into(), - }; - - local_responses.push((response.into(), src)); - } - } - } - } -} - -pub fn ip_version_from_ip(ip: IpAddr) -> IpVersion { - match ip { - IpAddr::V4(_) => IpVersion::IPv4, - IpAddr::V6(ip) => { - if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { - IpVersion::IPv4 - } else { - IpVersion::IPv6 - } - } - } -} - -pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { - let socket = if config.network.address.is_ipv4() { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } - .expect("create socket"); - - socket.set_reuse_port(true).expect("socket: set reuse port"); - - socket - .set_nonblocking(true) - .expect("socket: set nonblocking"); - - socket - .bind(&config.network.address.into()) - .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { - ::log::error!( - "socket: failed setting recv buffer to {}: {:?}", - recv_buffer_size, - err - ); - } - } - - socket.into() -} diff --git a/aquatic_udp/src/lib/other/handlers.rs b/aquatic_udp/src/lib/other/handlers.rs deleted file mode 100644 index 0c2c5f2..0000000 --- a/aquatic_udp/src/lib/other/handlers.rs +++ /dev/null @@ -1,98 +0,0 @@ -use std::net::SocketAddr; -use std::time::Duration; - -use aquatic_common::ValidUntil; -use crossbeam_channel::{Receiver, Sender}; -use rand::{rngs::SmallRng, SeedableRng}; - -use aquatic_udp_protocol::*; - -use crate::common::handlers::*; -use crate::config::Config; -use crate::other::common::*; - -pub fn run_request_worker( - state: State, - config: Config, - request_receiver: Receiver<(ConnectedRequest, SocketAddr)>, - response_sender: Sender<(ConnectedResponse, SocketAddr)>, -) { - let mut announce_requests: Vec<(AnnounceRequest, SocketAddr)> = Vec::new(); - let mut scrape_requests: Vec<(ScrapeRequest, SocketAddr)> = Vec::new(); - let mut responses: Vec<(ConnectedResponse, SocketAddr)> = Vec::new(); - - let mut small_rng = SmallRng::from_entropy(); - - let timeout = Duration::from_micros(config.handlers.channel_recv_timeout_microseconds); - - loop { - let mut opt_torrents = None; - - // Collect requests from channel, divide them by type - // - // Collect a maximum number of request. Stop collecting before that - // number is reached if having waited for too long for a request, but - // only if TorrentMaps mutex isn't locked. - for i in 0..config.handlers.max_requests_per_iter { - let (request, src): (ConnectedRequest, SocketAddr) = if i == 0 { - match request_receiver.recv() { - Ok(r) => r, - Err(_) => break, // Really shouldn't happen - } - } else { - match request_receiver.recv_timeout(timeout) { - Ok(r) => r, - Err(_) => { - if let Some(guard) = state.torrents.try_lock() { - opt_torrents = Some(guard); - - break; - } else { - continue; - } - } - } - }; - - match request { - ConnectedRequest::Announce(request) => announce_requests.push((request, src)), - ConnectedRequest::Scrape { request, .. } => scrape_requests.push((request, src)), - } - } - - // Generate responses for announce and scrape requests, then drop MutexGuard. - { - let mut torrents = opt_torrents.unwrap_or_else(|| state.torrents.lock()); - - let peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); - - responses.extend(announce_requests.drain(..).map(|(request, src)| { - let response = handle_announce_request( - &config, - &mut small_rng, - &mut torrents, - request, - src, - peer_valid_until, - ); - - (ConnectedResponse::Announce(response), src) - })); - - responses.extend(scrape_requests.drain(..).map(|(request, src)| { - let response = ConnectedResponse::Scrape { - response: handle_scrape_request(&mut torrents, src, request), - original_indices: Vec::new(), - }; - - (response, src) - })); - } - - for r in responses.drain(..) { - if let Err(err) = response_sender.send(r) { - ::log::error!("error sending response to channel: {}", err); - } - } - } -} diff --git a/aquatic_udp/src/lib/other/mod.rs b/aquatic_udp/src/lib/other/mod.rs deleted file mode 100644 index 19e7c31..0000000 --- a/aquatic_udp/src/lib/other/mod.rs +++ /dev/null @@ -1,172 +0,0 @@ -use std::sync::{atomic::AtomicUsize, Arc}; -use std::thread::Builder; -use std::time::Duration; - -use anyhow::Context; -#[cfg(feature = "cpu-pinning")] -use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex}; -use aquatic_common::privileges::drop_privileges_after_socket_binding; -use crossbeam_channel::unbounded; - -use aquatic_common::access_list::update_access_list; -use signal_hook::consts::SIGUSR1; -use signal_hook::iterator::Signals; - -use crate::config::Config; - -pub mod common; -pub mod handlers; -#[cfg(feature = "with-mio")] -pub mod network_mio; -#[cfg(feature = "with-io-uring")] -pub mod network_uring; -pub mod tasks; - -use common::State; - -pub fn run(config: Config) -> ::anyhow::Result<()> { - let state = State::default(); - - update_access_list(&config.access_list, &state.access_list)?; - - let mut signals = Signals::new(::std::iter::once(SIGUSR1))?; - - { - let config = config.clone(); - let state = state.clone(); - - ::std::thread::spawn(move || run_inner(config, state)); - } - - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::Other, - ); - - for signal in &mut signals { - match signal { - SIGUSR1 => { - let _ = update_access_list(&config.access_list, &state.access_list); - } - _ => unreachable!(), - } - } - - Ok(()) -} - -pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { - let num_bound_sockets = Arc::new(AtomicUsize::new(0)); - - let (request_sender, request_receiver) = unbounded(); - let (response_sender, response_receiver) = unbounded(); - - for i in 0..config.request_workers { - let state = state.clone(); - let config = config.clone(); - let request_receiver = request_receiver.clone(); - let response_sender = response_sender.clone(); - - Builder::new() - .name(format!("request-{:02}", i + 1)) - .spawn(move || { - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::RequestWorker(i), - ); - - handlers::run_request_worker(state, config, request_receiver, response_sender) - }) - .with_context(|| "spawn request worker")?; - } - - for i in 0..config.socket_workers { - let state = state.clone(); - let config = config.clone(); - let request_sender = request_sender.clone(); - let response_receiver = response_receiver.clone(); - let num_bound_sockets = num_bound_sockets.clone(); - - Builder::new() - .name(format!("socket-{:02}", i + 1)) - .spawn(move || { - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::SocketWorker(i), - ); - - cfg_if::cfg_if!( - if #[cfg(feature = "with-io-uring")] { - network_uring::run_socket_worker( - state, - config, - request_sender, - response_receiver, - num_bound_sockets, - ); - } else { - network_mio::run_socket_worker( - state, - config, - i, - request_sender, - response_receiver, - num_bound_sockets, - ); - } - ); - }) - .with_context(|| "spawn socket worker")?; - } - - if config.statistics.interval != 0 { - let state = state.clone(); - let config = config.clone(); - - Builder::new() - .name("statistics-collector".to_string()) - .spawn(move || { - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::Other, - ); - - loop { - ::std::thread::sleep(Duration::from_secs(config.statistics.interval)); - - tasks::gather_and_print_statistics(&state, &config); - } - }) - .with_context(|| "spawn statistics worker")?; - } - - drop_privileges_after_socket_binding( - &config.privileges, - num_bound_sockets, - config.socket_workers, - ) - .unwrap(); - - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::Other, - ); - - loop { - ::std::thread::sleep(Duration::from_secs( - config.cleaning.torrent_cleaning_interval, - )); - - state.torrents.lock().clean(&config, &state.access_list); - } -} diff --git a/aquatic_udp/src/lib/other/tasks.rs b/aquatic_udp/src/lib/tasks.rs similarity index 100% rename from aquatic_udp/src/lib/other/tasks.rs rename to aquatic_udp/src/lib/tasks.rs diff --git a/aquatic_udp_bench/Cargo.toml b/aquatic_udp_bench/Cargo.toml index a645449..e2950df 100644 --- a/aquatic_udp_bench/Cargo.toml +++ b/aquatic_udp_bench/Cargo.toml @@ -13,6 +13,7 @@ name = "aquatic_udp_bench" anyhow = "1" aquatic_cli_helpers = "0.1.0" aquatic_udp = "0.1.0" +aquatic_udp_protocol = "0.1.0" crossbeam-channel = "0.5" indicatif = "0.16.2" mimalloc = { version = "0.1", default-features = false } diff --git a/aquatic_udp_bench/src/announce.rs b/aquatic_udp_bench/src/announce.rs index 5eac23d..bcc84e8 100644 --- a/aquatic_udp_bench/src/announce.rs +++ b/aquatic_udp_bench/src/announce.rs @@ -6,9 +6,9 @@ use indicatif::ProgressIterator; use rand::Rng; use rand_distr::Pareto; -use aquatic_udp::common::handlers::*; use aquatic_udp::common::*; use aquatic_udp::config::Config; +use aquatic_udp_protocol::*; use crate::common::*; use crate::config::BenchConfig; diff --git a/aquatic_udp_bench/src/main.rs b/aquatic_udp_bench/src/main.rs index 6d294e2..fb2895f 100644 --- a/aquatic_udp_bench/src/main.rs +++ b/aquatic_udp_bench/src/main.rs @@ -7,6 +7,7 @@ //! Scrape: 1 873 545 requests/second, 533.75 ns/request //! ``` +use aquatic_udp::handlers::run_request_worker; use crossbeam_channel::unbounded; use num_format::{Locale, ToFormattedString}; use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng}; @@ -15,8 +16,7 @@ use std::time::Duration; use aquatic_cli_helpers::run_app_with_cli_and_config; use aquatic_udp::common::*; use aquatic_udp::config::Config; -use aquatic_udp::other::common::*; -use aquatic_udp::other::handlers; +use aquatic_udp_protocol::*; use config::BenchConfig; @@ -52,7 +52,7 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { let response_sender = response_sender.clone(); ::std::thread::spawn(move || { - handlers::run_request_worker(state, config, request_receiver, response_sender) + run_request_worker(state, config, request_receiver, response_sender) }); } diff --git a/aquatic_udp_bench/src/scrape.rs b/aquatic_udp_bench/src/scrape.rs index f718753..39d6ade 100644 --- a/aquatic_udp_bench/src/scrape.rs +++ b/aquatic_udp_bench/src/scrape.rs @@ -6,9 +6,9 @@ use indicatif::ProgressIterator; use rand::Rng; use rand_distr::Pareto; -use aquatic_udp::common::handlers::*; use aquatic_udp::common::*; use aquatic_udp::config::Config; +use aquatic_udp_protocol::*; use crate::common::*; use crate::config::BenchConfig; diff --git a/scripts/run-aquatic-udp.sh b/scripts/run-aquatic-udp.sh index 0af7880..fe99a35 100755 --- a/scripts/run-aquatic-udp.sh +++ b/scripts/run-aquatic-udp.sh @@ -2,14 +2,16 @@ . ./scripts/env-native-cpu-without-avx-512 +USAGE="Usage: $0 [mio|io-uring] [ARGS]" + if [ "$1" != "mio" ] && [ "$1" != "glommio" ] && [ "$1" != "io-uring" ]; then - echo "Usage: $0 [mio|glommio|io-uring] [ARGS]" + echo "$USAGE" else if [ "$1" = "mio" ]; then cargo run --release --bin aquatic_udp -- "${@:2}" elif [ "$1" = "io-uring" ]; then cargo run --release --features "with-io-uring" --no-default-features --bin aquatic_udp -- "${@:2}" else - cargo run --release --features "with-glommio" --no-default-features --bin aquatic_udp -- "${@:2}" + echo "$USAGE" fi fi From 4b07e007f33733a9e46c077a0da1b7a6f9e54650 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Sun, 14 Nov 2021 22:07:58 +0100 Subject: [PATCH 12/56] udp: add network.only_ipv6 config flag --- TODO.md | 2 -- aquatic_udp/src/lib/common/network.rs | 4 ++++ aquatic_udp/src/lib/config.rs | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/TODO.md b/TODO.md index 50b9bca..986d9a6 100644 --- a/TODO.md +++ b/TODO.md @@ -20,12 +20,10 @@ * uring * ValidUntil periodic update * statistics - * ipv6_only * shared config keys such as poll interval * mio * stagger connection cleaning intervals? * ipv4-mapped addresses - * ipv6_only * glommio * consider sending local responses immediately * consider adding ConnectedScrapeRequest::Scrape(PendingScrapeRequest) diff --git a/aquatic_udp/src/lib/common/network.rs b/aquatic_udp/src/lib/common/network.rs index ca3edaf..053a7da 100644 --- a/aquatic_udp/src/lib/common/network.rs +++ b/aquatic_udp/src/lib/common/network.rs @@ -123,6 +123,10 @@ pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { } .expect("create socket"); + if config.network.only_ipv6 { + socket.set_only_v6(true).expect("socket: set only ipv6"); + } + socket.set_reuse_port(true).expect("socket: set reuse port"); socket diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index f6c0247..8f2b8a2 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -38,6 +38,7 @@ impl aquatic_cli_helpers::Config for Config { pub struct NetworkConfig { /// Bind to this address pub address: SocketAddr, + pub only_ipv6: bool, /// Size of socket recv buffer. Use 0 for OS default. /// /// This setting can have a big impact on dropped packages. It might @@ -120,6 +121,7 @@ impl Default for NetworkConfig { fn default() -> Self { Self { address: SocketAddr::from(([0, 0, 0, 0], 3000)), + only_ipv6: false, socket_recv_buffer_size: 4096 * 128, #[cfg(feature = "with-mio")] poll_event_capacity: 4096, From f3e41148fe53df1015674ca77944b8d27ddc5663 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Mon, 15 Nov 2021 21:32:20 +0100 Subject: [PATCH 13/56] udp: split AnnounceResponse into V4 and V6 versions --- aquatic_udp/src/lib/common/mod.rs | 128 +++++++++++++++++-------- aquatic_udp/src/lib/handlers.rs | 16 ++-- aquatic_udp/src/lib/network_mio.rs | 41 +++++++- aquatic_udp/src/lib/network_uring.rs | 6 +- aquatic_udp_bench/src/announce.rs | 4 +- aquatic_udp_load_test/src/handler.rs | 9 +- aquatic_udp_load_test/src/network.rs | 6 +- aquatic_udp_protocol/src/common.rs | 28 ++++-- aquatic_udp_protocol/src/response.rs | 136 ++++++++++++++++----------- 9 files changed, 255 insertions(+), 119 deletions(-) diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common/mod.rs index 8a29d21..41c52e8 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common/mod.rs @@ -18,35 +18,6 @@ pub mod network; pub const MAX_PACKET_SIZE: usize = 8192; -#[derive(Debug)] -pub enum ConnectedRequest { - Announce(AnnounceRequest), - Scrape { - request: ScrapeRequest, - /// Currently only used by glommio implementation - original_indices: Vec, - }, -} - -#[derive(Debug)] -pub enum ConnectedResponse { - Announce(AnnounceResponse), - Scrape { - response: ScrapeResponse, - /// Currently only used by glommio implementation - original_indices: Vec, - }, -} - -impl Into for ConnectedResponse { - fn into(self) -> Response { - match self { - Self::Announce(response) => Response::Announce(response), - Self::Scrape { response, .. } => Response::Scrape(response), - } - } -} - pub trait Ip: Hash + PartialEq + Eq + Clone + Copy { fn ip_addr(self) -> IpAddr; } @@ -63,6 +34,89 @@ impl Ip for Ipv6Addr { } } +#[derive(Debug)] +pub enum ConnectedRequest { + Announce(AnnounceRequest), + Scrape { + request: ScrapeRequest, + /// Currently only used by glommio implementation + original_indices: Vec, + }, +} + +#[derive(Debug)] +pub enum ConnectedResponse { + AnnounceIpv4(AnnounceResponseIpv4), + AnnounceIpv6(AnnounceResponseIpv6), + Scrape { + response: ScrapeResponse, + /// Currently only used by glommio implementation + original_indices: Vec, + }, +} + +impl Into for ConnectedResponse { + fn into(self) -> Response { + match self { + Self::AnnounceIpv4(response) => Response::AnnounceIpv4(response), + Self::AnnounceIpv6(response) => Response::AnnounceIpv6(response), + Self::Scrape { response, .. } => Response::Scrape(response), + } + } +} + +#[derive(Clone, PartialEq, Debug)] +pub struct ProtocolResponsePeer { + pub ip_address: I, + pub port: Port, +} + +pub struct ProtocolAnnounceResponse { + pub transaction_id: TransactionId, + pub announce_interval: AnnounceInterval, + pub leechers: NumberOfPeers, + pub seeders: NumberOfPeers, + pub peers: Vec>, +} + +impl Into for ProtocolAnnounceResponse { + fn into(self) -> ConnectedResponse { + ConnectedResponse::AnnounceIpv4(AnnounceResponseIpv4 { + transaction_id: self.transaction_id, + announce_interval: self.announce_interval, + leechers: self.leechers, + seeders: self.seeders, + peers: self + .peers + .into_iter() + .map(|peer| ResponsePeerIpv4 { + ip_address: peer.ip_address, + port: peer.port, + }) + .collect(), + }) + } +} + +impl Into for ProtocolAnnounceResponse { + fn into(self) -> ConnectedResponse { + ConnectedResponse::AnnounceIpv6(AnnounceResponseIpv6 { + transaction_id: self.transaction_id, + announce_interval: self.announce_interval, + leechers: self.leechers, + seeders: self.seeders, + peers: self + .peers + .into_iter() + .map(|peer| ResponsePeerIpv6 { + ip_address: peer.ip_address, + port: peer.port, + }) + .collect(), + }) + } +} + #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum PeerStatus { Seeding, @@ -96,9 +150,9 @@ pub struct Peer { impl Peer { #[inline(always)] - pub fn to_response_peer(&self) -> ResponsePeer { - ResponsePeer { - ip_address: self.ip_address.ip_addr(), + pub fn to_response_peer(&self) -> ProtocolResponsePeer { + ProtocolResponsePeer { + ip_address: self.ip_address, port: self.port, } } @@ -230,7 +284,7 @@ pub fn ip_version_from_ip(ip: IpAddr) -> IpVersion { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv6Addr}; + use std::net::Ipv6Addr; use crate::{common::MAX_PACKET_SIZE, config::Config}; @@ -263,14 +317,14 @@ mod tests { let config = Config::default(); - let peers = ::std::iter::repeat(ResponsePeer { - ip_address: IpAddr::V6(Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1)), + let peers = ::std::iter::repeat(ResponsePeerIpv6 { + ip_address: Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1), port: Port(1), }) .take(config.protocol.max_response_peers) .collect(); - let response = Response::Announce(AnnounceResponse { + let response = Response::AnnounceIpv6(AnnounceResponseIpv6 { transaction_id: TransactionId(1), announce_interval: AnnounceInterval(1), seeders: NumberOfPeers(1), @@ -280,7 +334,7 @@ mod tests { let mut buf = Vec::new(); - response.write(&mut buf, IpVersion::IPv6).unwrap(); + response.write(&mut buf).unwrap(); println!("Buffer len: {}", buf.len()); diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index c77ce89..1650a98 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -79,7 +79,7 @@ pub fn run_request_worker( peer_valid_until, ); - (ConnectedResponse::Announce(response), src) + (response, src) })); responses.extend(scrape_requests.drain(..).map(|(request, src)| { @@ -107,8 +107,8 @@ pub fn handle_announce_request( request: AnnounceRequest, src: SocketAddr, peer_valid_until: ValidUntil, -) -> AnnounceResponse { - match convert_ipv4_mapped_ipv6(src.ip()) { +) -> ConnectedResponse { + match src.ip() { IpAddr::V4(ip) => handle_announce_request_inner( config, rng, @@ -116,7 +116,8 @@ pub fn handle_announce_request( request, ip, peer_valid_until, - ), + ) + .into(), IpAddr::V6(ip) => handle_announce_request_inner( config, rng, @@ -124,7 +125,8 @@ pub fn handle_announce_request( request, ip, peer_valid_until, - ), + ) + .into(), } } @@ -135,7 +137,7 @@ fn handle_announce_request_inner( request: AnnounceRequest, peer_ip: I, peer_valid_until: ValidUntil, -) -> AnnounceResponse { +) -> ProtocolAnnounceResponse { let peer_key = PeerMapKey { ip: peer_ip, peer_id: request.peer_id, @@ -186,7 +188,7 @@ fn handle_announce_request_inner( Peer::to_response_peer, ); - AnnounceResponse { + ProtocolAnnounceResponse { transaction_id: request.transaction_id, announce_interval: AnnounceInterval(config.protocol.peer_announce_interval), leechers: NumberOfPeers(torrent_data.num_leechers as i32), diff --git a/aquatic_udp/src/lib/network_mio.rs b/aquatic_udp/src/lib/network_mio.rs index 164aeb1..35769cf 100644 --- a/aquatic_udp/src/lib/network_mio.rs +++ b/aquatic_udp/src/lib/network_mio.rs @@ -1,5 +1,5 @@ use std::io::{Cursor, ErrorKind}; -use std::net::SocketAddr; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, @@ -128,6 +128,22 @@ fn read_requests( requests_received += 1; } + let src = match src { + SocketAddr::V6(src) => { + match src.ip().octets() { + // Convert IPv4-mapped address (available in std but nightly-only) + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { + SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(a, b, c, d), + src.port(), + )) + } + _ => src.into(), + } + } + src => src, + }; + handle_request( config, connections, @@ -182,16 +198,31 @@ fn send_responses( .map(|(response, addr)| (response.into(), addr)), ); - for (response, src) in response_iterator { + for (response, addr) in response_iterator { cursor.set_position(0); - let ip_version = ip_version_from_ip(src.ip()); + let addr = if config.network.address.is_ipv4() { + if let SocketAddr::V4(addr) = addr { + SocketAddr::V4(addr) + } else { + unreachable!() + } + } else { + match addr { + SocketAddr::V4(addr) => { + let ip = addr.ip().to_ipv6_mapped(); - match response.write(&mut cursor, ip_version) { + SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0)) + } + addr => addr, + } + }; + + match response.write(&mut cursor) { Ok(()) => { let amt = cursor.position() as usize; - match socket.send_to(&cursor.get_ref()[..amt], src) { + match socket.send_to(&cursor.get_ref()[..amt], addr) { Ok(amt) => { responses_sent += 1; bytes_sent += amt; diff --git a/aquatic_udp/src/lib/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs index dc2722e..7d56705 100644 --- a/aquatic_udp/src/lib/network_uring.rs +++ b/aquatic_udp/src/lib/network_uring.rs @@ -215,7 +215,7 @@ pub fn run_socket_worker( let buffer_index = user_data.get_buffer_index(); let buffer_len = result as usize; - let addr = if config.network.address.is_ipv4() { + let src = if config.network.address.is_ipv4() { SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::from(u32::from_be( sockaddrs_ipv4[buffer_index].sin_addr.s_addr, @@ -258,7 +258,7 @@ pub fn run_socket_worker( &mut local_responses, valid_until, res_request, - addr, + src, ); } } @@ -397,7 +397,7 @@ fn queue_response( let mut cursor = Cursor::new(&mut buffers[buffer_index][..]); - match response.write(&mut cursor, ip_version_from_ip(addr.ip())) { + match response.write(&mut cursor) { Ok(()) => { iovs[buffer_index].iov_len = cursor.position() as usize; diff --git a/aquatic_udp_bench/src/announce.rs b/aquatic_udp_bench/src/announce.rs index bcc84e8..1354bdd 100644 --- a/aquatic_udp_bench/src/announce.rs +++ b/aquatic_udp_bench/src/announce.rs @@ -42,7 +42,7 @@ pub fn bench_announce_handler( .unwrap(); } - while let Ok((ConnectedResponse::Announce(r), _)) = response_receiver.try_recv() { + while let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.try_recv() { num_responses += 1; if let Some(last_peer) = r.peers.last() { @@ -54,7 +54,7 @@ pub fn bench_announce_handler( let total = bench_config.num_announce_requests * (round + 1); while num_responses < total { - if let Ok((ConnectedResponse::Announce(r), _)) = response_receiver.recv() { + if let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.recv() { num_responses += 1; if let Some(last_peer) = r.peers.last() { diff --git a/aquatic_udp_load_test/src/handler.rs b/aquatic_udp_load_test/src/handler.rs index 77180f7..e690ca4 100644 --- a/aquatic_udp_load_test/src/handler.rs +++ b/aquatic_udp_load_test/src/handler.rs @@ -165,7 +165,14 @@ fn process_response( Some(request) } - Response::Announce(r) => if_torrent_peer_move_and_create_random_request( + Response::AnnounceIpv4(r) => if_torrent_peer_move_and_create_random_request( + config, + rng, + info_hashes, + torrent_peers, + r.transaction_id, + ), + Response::AnnounceIpv6(r) => if_torrent_peer_move_and_create_random_request( config, rng, info_hashes, diff --git a/aquatic_udp_load_test/src/network.rs b/aquatic_udp_load_test/src/network.rs index 0f0269f..358a27b 100644 --- a/aquatic_udp_load_test/src/network.rs +++ b/aquatic_udp_load_test/src/network.rs @@ -129,7 +129,11 @@ fn read_responses( match Response::from_bytes(&buffer[0..amt]) { Ok(response) => { match response { - Response::Announce(ref r) => { + Response::AnnounceIpv4(ref r) => { + ls.responses_announce += 1; + ls.response_peers += r.peers.len(); + } + Response::AnnounceIpv6(ref r) => { ls.responses_announce += 1; ls.response_peers += r.peers.len(); } diff --git a/aquatic_udp_protocol/src/common.rs b/aquatic_udp_protocol/src/common.rs index ac56e18..6070e29 100644 --- a/aquatic_udp_protocol/src/common.rs +++ b/aquatic_udp_protocol/src/common.rs @@ -1,4 +1,4 @@ -use std::net::IpAddr; +use std::net::{Ipv4Addr, Ipv6Addr}; #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum IpVersion { @@ -37,8 +37,14 @@ pub struct PeerId(pub [u8; 20]); pub struct PeerKey(pub u32); #[derive(Hash, PartialEq, Eq, Clone, Debug)] -pub struct ResponsePeer { - pub ip_address: IpAddr, +pub struct ResponsePeerIpv4 { + pub ip_address: Ipv4Addr, + pub port: Port, +} + +#[derive(Hash, PartialEq, Eq, Clone, Debug)] +pub struct ResponsePeerIpv6 { + pub ip_address: Ipv6Addr, pub port: Port, } @@ -80,11 +86,21 @@ impl quickcheck::Arbitrary for PeerId { } #[cfg(test)] -impl quickcheck::Arbitrary for ResponsePeer { +impl quickcheck::Arbitrary for ResponsePeerIpv4 { fn arbitrary(g: &mut quickcheck::Gen) -> Self { Self { - ip_address: ::std::net::IpAddr::arbitrary(g), - port: Port(u16::arbitrary(g)), + ip_address: quickcheck::Arbitrary::arbitrary(g), + port: Port(u16::arbitrary(g).into()), + } + } +} + +#[cfg(test)] +impl quickcheck::Arbitrary for ResponsePeerIpv6 { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + Self { + ip_address: quickcheck::Arbitrary::arbitrary(g), + port: Port(u16::arbitrary(g).into()), } } } diff --git a/aquatic_udp_protocol/src/response.rs b/aquatic_udp_protocol/src/response.rs index b8a514a..99f3afa 100644 --- a/aquatic_udp_protocol/src/response.rs +++ b/aquatic_udp_protocol/src/response.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use std::convert::TryInto; use std::io::{self, Cursor, Write}; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::net::{Ipv4Addr, Ipv6Addr}; use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt}; @@ -21,12 +21,21 @@ pub struct ConnectResponse { } #[derive(PartialEq, Eq, Clone, Debug)] -pub struct AnnounceResponse { +pub struct AnnounceResponseIpv4 { pub transaction_id: TransactionId, pub announce_interval: AnnounceInterval, pub leechers: NumberOfPeers, pub seeders: NumberOfPeers, - pub peers: Vec, + pub peers: Vec, +} + +#[derive(PartialEq, Eq, Clone, Debug)] +pub struct AnnounceResponseIpv6 { + pub transaction_id: TransactionId, + pub announce_interval: AnnounceInterval, + pub leechers: NumberOfPeers, + pub seeders: NumberOfPeers, + pub peers: Vec, } #[derive(PartialEq, Eq, Clone, Debug)] @@ -44,7 +53,8 @@ pub struct ErrorResponse { #[derive(PartialEq, Eq, Clone, Debug)] pub enum Response { Connect(ConnectResponse), - Announce(AnnounceResponse), + AnnounceIpv4(AnnounceResponseIpv4), + AnnounceIpv6(AnnounceResponseIpv6), Scrape(ScrapeResponse), Error(ErrorResponse), } @@ -55,9 +65,15 @@ impl From for Response { } } -impl From for Response { - fn from(r: AnnounceResponse) -> Self { - Self::Announce(r) +impl From for Response { + fn from(r: AnnounceResponseIpv4) -> Self { + Self::AnnounceIpv4(r) + } +} + +impl From for Response { + fn from(r: AnnounceResponseIpv6) -> Self { + Self::AnnounceIpv6(r) } } @@ -81,42 +97,23 @@ impl Response { /// addresses. Clients seem not to support it very well, but due to a lack /// of alternative solutions, it is implemented here. #[inline] - pub fn write(self, bytes: &mut impl Write, ip_version: IpVersion) -> Result<(), io::Error> { + pub fn write(self, bytes: &mut impl Write) -> Result<(), io::Error> { match self { Response::Connect(r) => { bytes.write_i32::(0)?; bytes.write_i32::(r.transaction_id.0)?; bytes.write_i64::(r.connection_id.0)?; } - Response::Announce(r) => { - if ip_version == IpVersion::IPv4 { - bytes.write_i32::(1)?; - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_i32::(r.announce_interval.0)?; - bytes.write_i32::(r.leechers.0)?; - bytes.write_i32::(r.seeders.0)?; + Response::AnnounceIpv4(r) => { + bytes.write_i32::(1)?; + bytes.write_i32::(r.transaction_id.0)?; + bytes.write_i32::(r.announce_interval.0)?; + bytes.write_i32::(r.leechers.0)?; + bytes.write_i32::(r.seeders.0)?; - // Silently ignore peers with wrong IP version - for peer in r.peers { - if let IpAddr::V4(ip) = peer.ip_address { - bytes.write_all(&ip.octets())?; - bytes.write_u16::(peer.port.0)?; - } - } - } else { - bytes.write_i32::(4)?; - bytes.write_i32::(r.transaction_id.0)?; - bytes.write_i32::(r.announce_interval.0)?; - bytes.write_i32::(r.leechers.0)?; - bytes.write_i32::(r.seeders.0)?; - - // Silently ignore peers with wrong IP version - for peer in r.peers { - if let IpAddr::V6(ip) = peer.ip_address { - bytes.write_all(&ip.octets())?; - bytes.write_u16::(peer.port.0)?; - } - } + for peer in r.peers { + bytes.write_all(&peer.ip_address.octets())?; + bytes.write_u16::(peer.port.0)?; } } Response::Scrape(r) => { @@ -135,6 +132,18 @@ impl Response { bytes.write_all(r.message.as_bytes())?; } + Response::AnnounceIpv6(r) => { + bytes.write_i32::(4)?; + bytes.write_i32::(r.transaction_id.0)?; + bytes.write_i32::(r.announce_interval.0)?; + bytes.write_i32::(r.leechers.0)?; + bytes.write_i32::(r.seeders.0)?; + + for peer in r.peers { + bytes.write_all(&peer.ip_address.octets())?; + bytes.write_u16::(peer.port.0)?; + } + } } Ok(()) @@ -171,17 +180,17 @@ impl Response { .chunks_exact(6) .map(|chunk| { let ip_bytes: [u8; 4] = (&chunk[..4]).try_into().unwrap(); - let ip_address = IpAddr::V4(Ipv4Addr::from(ip_bytes)); + let ip_address = Ipv4Addr::from(ip_bytes); let port = (&chunk[4..]).read_u16::().unwrap(); - ResponsePeer { + ResponsePeerIpv4 { ip_address, port: Port(port), } }) .collect(); - Ok((AnnounceResponse { + Ok((AnnounceResponseIpv4 { transaction_id: TransactionId(transaction_id), announce_interval: AnnounceInterval(announce_interval), leechers: NumberOfPeers(leechers), @@ -244,17 +253,17 @@ impl Response { .chunks_exact(18) .map(|chunk| { let ip_bytes: [u8; 16] = (&chunk[..16]).try_into().unwrap(); - let ip_address = IpAddr::V6(Ipv6Addr::from(ip_bytes)); + let ip_address = Ipv6Addr::from(ip_bytes); let port = (&chunk[16..]).read_u16::().unwrap(); - ResponsePeer { + ResponsePeerIpv6 { ip_address, port: Port(port), } }) .collect(); - Ok((AnnounceResponse { + Ok((AnnounceResponseIpv6 { transaction_id: TransactionId(transaction_id), announce_interval: AnnounceInterval(announce_interval), leechers: NumberOfPeers(leechers), @@ -297,10 +306,26 @@ mod tests { } } - impl quickcheck::Arbitrary for AnnounceResponse { + impl quickcheck::Arbitrary for AnnounceResponseIpv4 { fn arbitrary(g: &mut quickcheck::Gen) -> Self { let peers = (0..u8::arbitrary(g)) - .map(|_| ResponsePeer::arbitrary(g)) + .map(|_| ResponsePeerIpv4::arbitrary(g)) + .collect(); + + Self { + transaction_id: TransactionId(i32::arbitrary(g)), + announce_interval: AnnounceInterval(i32::arbitrary(g)), + leechers: NumberOfPeers(i32::arbitrary(g)), + seeders: NumberOfPeers(i32::arbitrary(g)), + peers, + } + } + } + + impl quickcheck::Arbitrary for AnnounceResponseIpv6 { + fn arbitrary(g: &mut quickcheck::Gen) -> Self { + let peers = (0..u8::arbitrary(g)) + .map(|_| ResponsePeerIpv6::arbitrary(g)) .collect(); Self { @@ -326,10 +351,10 @@ mod tests { } } - fn same_after_conversion(response: Response, ip_version: IpVersion) -> bool { + fn same_after_conversion(response: Response) -> bool { let mut buf = Vec::new(); - response.clone().write(&mut buf, ip_version).unwrap(); + response.clone().write(&mut buf).unwrap(); let r2 = Response::from_bytes(&buf[..]).unwrap(); let success = response == r2; @@ -343,24 +368,21 @@ mod tests { #[quickcheck] fn test_connect_response_convert_identity(response: ConnectResponse) -> bool { - same_after_conversion(response.into(), IpVersion::IPv4) + same_after_conversion(response.into()) } #[quickcheck] - fn test_announce_response_convert_identity(data: (AnnounceResponse, IpVersion)) -> bool { - let mut r = data.0; + fn test_announce_response_ipv4_convert_identity(response: AnnounceResponseIpv4) -> bool { + same_after_conversion(response.into()) + } - if data.1 == IpVersion::IPv4 { - r.peers.retain(|peer| peer.ip_address.is_ipv4()); - } else { - r.peers.retain(|peer| peer.ip_address.is_ipv6()); - } - - same_after_conversion(r.into(), data.1) + #[quickcheck] + fn test_announce_response_ipv6_convert_identity(response: AnnounceResponseIpv6) -> bool { + same_after_conversion(response.into()) } #[quickcheck] fn test_scrape_response_convert_identity(response: ScrapeResponse) -> bool { - same_after_conversion(response.into(), IpVersion::IPv4) + same_after_conversion(response.into()) } } From 6bd63c2f5d4671fa7a1d25e108af95e5163403c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Mon, 15 Nov 2021 22:23:55 +0100 Subject: [PATCH 14/56] udp: clean up --- aquatic_udp/src/lib/common/mod.rs | 12 ------------ aquatic_udp_protocol/src/common.rs | 17 ----------------- 2 files changed, 29 deletions(-) diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common/mod.rs index 41c52e8..b0674ff 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common/mod.rs @@ -269,18 +269,6 @@ impl Default for State { } } } -pub fn ip_version_from_ip(ip: IpAddr) -> IpVersion { - match ip { - IpAddr::V4(_) => IpVersion::IPv4, - IpAddr::V6(ip) => { - if let [0, 0, 0, 0, 0, 0xffff, ..] = ip.segments() { - IpVersion::IPv4 - } else { - IpVersion::IPv6 - } - } - } -} #[cfg(test)] mod tests { diff --git a/aquatic_udp_protocol/src/common.rs b/aquatic_udp_protocol/src/common.rs index 6070e29..77192c6 100644 --- a/aquatic_udp_protocol/src/common.rs +++ b/aquatic_udp_protocol/src/common.rs @@ -1,11 +1,5 @@ use std::net::{Ipv4Addr, Ipv6Addr}; -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub enum IpVersion { - IPv4, - IPv6, -} - #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub struct AnnounceInterval(pub i32); @@ -48,17 +42,6 @@ pub struct ResponsePeerIpv6 { pub port: Port, } -#[cfg(test)] -impl quickcheck::Arbitrary for IpVersion { - fn arbitrary(g: &mut quickcheck::Gen) -> Self { - if bool::arbitrary(g) { - IpVersion::IPv4 - } else { - IpVersion::IPv6 - } - } -} - #[cfg(test)] impl quickcheck::Arbitrary for InfoHash { fn arbitrary(g: &mut quickcheck::Gen) -> Self { From 853ed916388f3674a54dff8265193bff3aee01d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Mon, 15 Nov 2021 22:31:44 +0100 Subject: [PATCH 15/56] udp: use only PeerId for peer map key, skip ip address Having the ip to avoid collisions likely doesn't adress any meaningful threat. --- aquatic_udp/src/lib/common/mod.rs | 8 +------ aquatic_udp/src/lib/handlers.rs | 40 +++++++++++++------------------ 2 files changed, 18 insertions(+), 30 deletions(-) diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common/mod.rs index b0674ff..a09d225 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common/mod.rs @@ -158,13 +158,7 @@ impl Peer { } } -#[derive(PartialEq, Eq, Hash, Clone, Copy)] -pub struct PeerMapKey { - pub ip: I, - pub peer_id: PeerId, -} - -pub type PeerMap = AHashIndexMap, Peer>; +pub type PeerMap = AHashIndexMap>; pub struct TorrentData { pub peers: PeerMap, diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index 1650a98..d23e7ab 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -138,11 +138,6 @@ fn handle_announce_request_inner( peer_ip: I, peer_valid_until: ValidUntil, ) -> ProtocolAnnounceResponse { - let peer_key = PeerMapKey { - ip: peer_ip, - peer_id: request.peer_id, - }; - let peer_status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left); let peer = Peer { @@ -158,14 +153,14 @@ fn handle_announce_request_inner( PeerStatus::Leeching => { torrent_data.num_leechers += 1; - torrent_data.peers.insert(peer_key, peer) + torrent_data.peers.insert(request.peer_id, peer) } PeerStatus::Seeding => { torrent_data.num_seeders += 1; - torrent_data.peers.insert(peer_key, peer) + torrent_data.peers.insert(request.peer_id, peer) } - PeerStatus::Stopped => torrent_data.peers.remove(&peer_key), + PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id), }; match opt_removed_peer.map(|peer| peer.status) { @@ -184,7 +179,7 @@ fn handle_announce_request_inner( rng, &torrent_data.peers, max_num_peers_to_take, - peer_key, + request.peer_id, Peer::to_response_peer, ); @@ -269,22 +264,20 @@ mod tests { use super::*; - fn gen_peer_map_key_and_value(i: u32) -> (PeerMapKey, Peer) { - let ip_address = Ipv4Addr::from(i.to_be_bytes()); - let peer_id = PeerId([0; 20]); + fn gen_peer_id(i: u32) -> PeerId { + let mut peer_id = PeerId([0; 20]); - let key = PeerMapKey { - ip: ip_address, - peer_id, - }; - let value = Peer { - ip_address, + peer_id.0[0..4].copy_from_slice(&i.to_ne_bytes()); + + peer_id + } + fn gen_peer(i: u32) -> Peer { + Peer { + ip_address: Ipv4Addr::from(i.to_be_bytes()), port: Port(1), status: PeerStatus::Leeching, valid_until: ValidUntil::new(0), - }; - - (key, value) + } } #[test] @@ -299,7 +292,8 @@ mod tests { let mut opt_sender_peer = None; for i in 0..gen_num_peers { - let (key, value) = gen_peer_map_key_and_value((i << 16) + i); + let key = gen_peer_id(i); + let value = gen_peer((i << 16) + i); if i == 0 { opt_sender_key = Some(key); @@ -315,7 +309,7 @@ mod tests { &mut rng, &peer_map, req_num_peers, - opt_sender_key.unwrap_or_else(|| gen_peer_map_key_and_value(1).0), + opt_sender_key.unwrap_or_else(|| gen_peer_id(1)), Peer::to_response_peer, ); From 4addb0de49c74bbc2b991d4b597ad404e41747d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Mon, 15 Nov 2021 22:35:49 +0100 Subject: [PATCH 16/56] Update TODO --- TODO.md | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/TODO.md b/TODO.md index 986d9a6..8ae2800 100644 --- a/TODO.md +++ b/TODO.md @@ -17,18 +17,17 @@ * cargo-deny * aquatic_udp + * shard torrent state + * old note that might be useful: + * consider adding ConnectedScrapeRequest::Scrape(PendingScrapeRequest) + containing TransactionId and BTreeMap, and same for + response * uring * ValidUntil periodic update * statistics * shared config keys such as poll interval * mio * stagger connection cleaning intervals? - * ipv4-mapped addresses - * glommio - * consider sending local responses immediately - * consider adding ConnectedScrapeRequest::Scrape(PendingScrapeRequest) - containing TransactionId and BTreeMap, and same for - response * aquatic_http: * clean out connections regularly From b617ff9d09e5f8c6a7b42a80f9698046ae0b543c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 01:03:29 +0100 Subject: [PATCH 17/56] udp: shard request workers by info hash --- aquatic_udp/src/lib/common/mod.rs | 96 +++++++++++++++---- aquatic_udp/src/lib/common/network.rs | 108 ++++++++++++++++++--- aquatic_udp/src/lib/handlers.rs | 127 ++++++++----------------- aquatic_udp/src/lib/lib.rs | 42 ++++++-- aquatic_udp/src/lib/network_mio.rs | 132 ++++++++++++++++---------- aquatic_udp/src/lib/network_uring.rs | 40 +++++--- aquatic_udp_bench/src/announce.rs | 8 +- aquatic_udp_bench/src/main.rs | 14 +-- aquatic_udp_bench/src/scrape.rs | 29 +++--- 9 files changed, 378 insertions(+), 218 deletions(-) diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common/mod.rs index a09d225..72f9862 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common/mod.rs @@ -1,9 +1,11 @@ +use std::collections::BTreeMap; use std::hash::Hash; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::atomic::AtomicUsize; use std::sync::Arc; use std::time::Instant; +use crossbeam_channel::Sender; use parking_lot::Mutex; use socket2::{Domain, Protocol, Socket, Type}; @@ -34,35 +36,29 @@ impl Ip for Ipv6Addr { } } +#[derive(Debug)] +pub struct PendingScrapeRequest { + pub transaction_id: TransactionId, + pub info_hashes: BTreeMap, +} + +#[derive(Debug)] +pub struct PendingScrapeResponse { + pub transaction_id: TransactionId, + pub torrent_stats: BTreeMap, +} + #[derive(Debug)] pub enum ConnectedRequest { Announce(AnnounceRequest), - Scrape { - request: ScrapeRequest, - /// Currently only used by glommio implementation - original_indices: Vec, - }, + Scrape(PendingScrapeRequest), } #[derive(Debug)] pub enum ConnectedResponse { AnnounceIpv4(AnnounceResponseIpv4), AnnounceIpv6(AnnounceResponseIpv6), - Scrape { - response: ScrapeResponse, - /// Currently only used by glommio implementation - original_indices: Vec, - }, -} - -impl Into for ConnectedResponse { - fn into(self) -> Response { - match self { - Self::AnnounceIpv4(response) => Response::AnnounceIpv4(response), - Self::AnnounceIpv6(response) => Response::AnnounceIpv6(response), - Self::Scrape { response, .. } => Response::Scrape(response), - } - } + Scrape(PendingScrapeResponse), } #[derive(Clone, PartialEq, Debug)] @@ -117,6 +113,64 @@ impl Into for ProtocolAnnounceResponse { } } +#[derive(Clone, Copy, Debug)] +pub struct SocketWorkerIndex(pub usize); + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub struct RequestWorkerIndex(pub usize); + +impl RequestWorkerIndex { + fn from_info_hash(config: &Config, info_hash: InfoHash) -> Self { + Self(info_hash.0[0] as usize % config.request_workers) + } +} + +pub struct ConnectedRequestSender { + index: SocketWorkerIndex, + senders: Vec>, +} + +impl ConnectedRequestSender { + pub fn new( + index: SocketWorkerIndex, + senders: Vec>, + ) -> Self { + Self { index, senders } + } + + pub fn try_send_to( + &self, + index: RequestWorkerIndex, + request: ConnectedRequest, + addr: SocketAddr, + ) { + if let Err(err) = self.senders[index.0].try_send((self.index, request, addr)) { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } +} + +pub struct ConnectedResponseSender { + senders: Vec>, +} + +impl ConnectedResponseSender { + pub fn new(senders: Vec>) -> Self { + Self { senders } + } + + pub fn try_send_to( + &self, + index: SocketWorkerIndex, + response: ConnectedResponse, + addr: SocketAddr, + ) { + if let Err(err) = self.senders[index.0].try_send((response, addr)) { + ::log::warn!("request_sender.try_send failed: {:?}", err) + } + } +} + #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum PeerStatus { Seeding, diff --git a/aquatic_udp/src/lib/common/network.rs b/aquatic_udp/src/lib/common/network.rs index 053a7da..ee3ca4f 100644 --- a/aquatic_udp/src/lib/common/network.rs +++ b/aquatic_udp/src/lib/common/network.rs @@ -4,7 +4,6 @@ use aquatic_common::access_list::AccessListCache; use aquatic_common::AHashIndexMap; use aquatic_common::ValidUntil; use aquatic_udp_protocol::*; -use crossbeam_channel::Sender; use rand::{prelude::StdRng, Rng}; use crate::common::*; @@ -34,12 +33,75 @@ impl ConnectionMap { } } +pub struct PendingScrapeResponseMeta { + num_pending: usize, + valid_until: ValidUntil, +} + +#[derive(Default)] +pub struct PendingScrapeResponseMap( + AHashIndexMap, +); + +impl PendingScrapeResponseMap { + pub fn prepare( + &mut self, + transaction_id: TransactionId, + num_pending: usize, + valid_until: ValidUntil, + ) { + let meta = PendingScrapeResponseMeta { + num_pending, + valid_until, + }; + let response = PendingScrapeResponse { + transaction_id, + torrent_stats: BTreeMap::new(), + }; + + self.0.insert(transaction_id, (meta, response)); + } + + pub fn add_and_get_finished(&mut self, response: PendingScrapeResponse) -> Option { + let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) { + r.0.num_pending -= 1; + + r.1.torrent_stats.extend(response.torrent_stats.into_iter()); + + r.0.num_pending == 0 + } else { + ::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map"); + + false + }; + + if finished { + let response = self.0.remove(&response.transaction_id).unwrap().1; + + Some(Response::Scrape(ScrapeResponse { + transaction_id: response.transaction_id, + torrent_stats: response.torrent_stats.into_values().collect(), + })) + } else { + None + } + } + + pub fn clean(&mut self) { + let now = Instant::now(); + + self.0.retain(|_, v| v.0.valid_until.0 > now); + self.0.shrink_to_fit(); + } +} + pub fn handle_request( config: &Config, connections: &mut ConnectionMap, + pending_scrape_responses: &mut PendingScrapeResponseMap, access_list_cache: &mut AccessListCache, rng: &mut StdRng, - request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + request_sender: &ConnectedRequestSender, local_responses: &mut Vec<(Response, SocketAddr)>, valid_until: ValidUntil, res_request: Result, @@ -66,11 +128,14 @@ pub fn handle_request( .load() .allows(access_list_mode, &request.info_hash.0) { - if let Err(err) = - request_sender.try_send((ConnectedRequest::Announce(request), src)) - { - ::log::warn!("request_sender.try_send failed: {:?}", err) - } + let worker_index = + RequestWorkerIndex::from_info_hash(config, request.info_hash); + + request_sender.try_send_to( + worker_index, + ConnectedRequest::Announce(request), + src, + ); } else { let response = Response::Error(ErrorResponse { transaction_id: request.transaction_id, @@ -83,13 +148,30 @@ pub fn handle_request( } Ok(Request::Scrape(request)) => { if connections.contains(request.connection_id, src) { - let request = ConnectedRequest::Scrape { - request, - original_indices: Vec::new(), - }; + let mut requests: AHashIndexMap = + Default::default(); - if let Err(err) = request_sender.try_send((request, src)) { - ::log::warn!("request_sender.try_send failed: {:?}", err) + let transaction_id = request.transaction_id; + + for (i, info_hash) in request.info_hashes.into_iter().enumerate() { + let pending = requests + .entry(RequestWorkerIndex::from_info_hash(&config, info_hash)) + .or_insert_with(|| PendingScrapeRequest { + transaction_id, + info_hashes: BTreeMap::new(), + }); + + pending.info_hashes.insert(i, info_hash); + } + + pending_scrape_responses.prepare(transaction_id, requests.len(), valid_until); + + for (request_worker_index, request) in requests { + request_sender.try_send_to( + request_worker_index, + ConnectedRequest::Scrape(request), + src, + ); } } } diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index d23e7ab..44c934d 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -1,12 +1,12 @@ +use std::collections::BTreeMap; use std::net::IpAddr; use std::net::SocketAddr; use std::time::Duration; use aquatic_common::ValidUntil; -use crossbeam_channel::{Receiver, Sender}; +use crossbeam_channel::Receiver; use rand::{rngs::SmallRng, SeedableRng}; -use aquatic_common::convert_ipv4_mapped_ipv6; use aquatic_common::extract_response_peers; use aquatic_udp_protocol::*; @@ -15,88 +15,37 @@ use crate::common::*; use crate::config::Config; pub fn run_request_worker( - state: State, config: Config, - request_receiver: Receiver<(ConnectedRequest, SocketAddr)>, - response_sender: Sender<(ConnectedResponse, SocketAddr)>, + request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, + response_sender: ConnectedResponseSender, ) { - let mut announce_requests: Vec<(AnnounceRequest, SocketAddr)> = Vec::new(); - let mut scrape_requests: Vec<(ScrapeRequest, SocketAddr)> = Vec::new(); - let mut responses: Vec<(ConnectedResponse, SocketAddr)> = Vec::new(); - + let mut torrents = TorrentMaps::default(); let mut small_rng = SmallRng::from_entropy(); let timeout = Duration::from_micros(config.handlers.channel_recv_timeout_microseconds); loop { - let mut opt_torrents = None; - - // Collect requests from channel, divide them by type - // - // Collect a maximum number of request. Stop collecting before that - // number is reached if having waited for too long for a request, but - // only if TorrentMaps mutex isn't locked. - for i in 0..config.handlers.max_requests_per_iter { - let (request, src): (ConnectedRequest, SocketAddr) = if i == 0 { - match request_receiver.recv() { - Ok(r) => r, - Err(_) => break, // Really shouldn't happen - } - } else { - match request_receiver.recv_timeout(timeout) { - Ok(r) => r, - Err(_) => { - if let Some(guard) = state.torrents.try_lock() { - opt_torrents = Some(guard); - - break; - } else { - continue; - } - } - } - }; - - match request { - ConnectedRequest::Announce(request) => announce_requests.push((request, src)), - ConnectedRequest::Scrape { request, .. } => scrape_requests.push((request, src)), - } - } - - // Generate responses for announce and scrape requests, then drop MutexGuard. - { - let mut torrents = opt_torrents.unwrap_or_else(|| state.torrents.lock()); - + if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) { let peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); - responses.extend(announce_requests.drain(..).map(|(request, src)| { - let response = handle_announce_request( + let response = match request { + ConnectedRequest::Announce(request) => handle_announce_request( &config, &mut small_rng, &mut torrents, request, src, peer_valid_until, - ); + ), + ConnectedRequest::Scrape(request) => { + ConnectedResponse::Scrape(handle_scrape_request(&mut torrents, src, request)) + } + }; - (response, src) - })); - - responses.extend(scrape_requests.drain(..).map(|(request, src)| { - let response = ConnectedResponse::Scrape { - response: handle_scrape_request(&mut torrents, src, request), - original_indices: Vec::new(), - }; - - (response, src) - })); + response_sender.try_send_to(sender_index, response, src); } - for r in responses.drain(..) { - if let Err(err) = response_sender.send(r) { - ::log::error!("error sending response to channel: {}", err); - } - } + // TODO: clean torrent map, update peer_valid_until } } @@ -207,41 +156,43 @@ fn calc_max_num_peers_to_take(config: &Config, peers_wanted: i32) -> usize { pub fn handle_scrape_request( torrents: &mut TorrentMaps, src: SocketAddr, - request: ScrapeRequest, -) -> ScrapeResponse { + request: PendingScrapeRequest, +) -> PendingScrapeResponse { const EMPTY_STATS: TorrentScrapeStatistics = create_torrent_scrape_statistics(0, 0); - let mut stats: Vec = Vec::with_capacity(request.info_hashes.len()); + let mut torrent_stats: BTreeMap = BTreeMap::new(); - let peer_ip = convert_ipv4_mapped_ipv6(src.ip()); - - if peer_ip.is_ipv4() { - for info_hash in request.info_hashes.iter() { - if let Some(torrent_data) = torrents.ipv4.get(info_hash) { - stats.push(create_torrent_scrape_statistics( + if src.ip().is_ipv4() { + torrent_stats.extend(request.info_hashes.into_iter().map(|(i, info_hash)| { + let s = if let Some(torrent_data) = torrents.ipv4.get(&info_hash) { + create_torrent_scrape_statistics( torrent_data.num_seeders as i32, torrent_data.num_leechers as i32, - )); + ) } else { - stats.push(EMPTY_STATS); - } - } + EMPTY_STATS + }; + + (i, s) + })); } else { - for info_hash in request.info_hashes.iter() { - if let Some(torrent_data) = torrents.ipv6.get(info_hash) { - stats.push(create_torrent_scrape_statistics( + torrent_stats.extend(request.info_hashes.into_iter().map(|(i, info_hash)| { + let s = if let Some(torrent_data) = torrents.ipv6.get(&info_hash) { + create_torrent_scrape_statistics( torrent_data.num_seeders as i32, torrent_data.num_leechers as i32, - )); + ) } else { - stats.push(EMPTY_STATS); - } - } + EMPTY_STATS + }; + + (i, s) + })); } - ScrapeResponse { + PendingScrapeResponse { transaction_id: request.transaction_id, - torrent_stats: stats, + torrent_stats, } } diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index a08b862..e321e95 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -9,6 +9,7 @@ pub mod tasks; use config::Config; +use std::collections::BTreeMap; use std::sync::{atomic::AtomicUsize, Arc}; use std::thread::Builder; use std::time::Duration; @@ -23,7 +24,7 @@ use aquatic_common::access_list::update_access_list; use signal_hook::consts::SIGUSR1; use signal_hook::iterator::Signals; -use common::State; +use common::{ConnectedRequestSender, ConnectedResponseSender, SocketWorkerIndex, State}; pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker"; @@ -63,14 +64,30 @@ pub fn run(config: Config) -> ::anyhow::Result<()> { pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { let num_bound_sockets = Arc::new(AtomicUsize::new(0)); - let (request_sender, request_receiver) = unbounded(); - let (response_sender, response_receiver) = unbounded(); + let mut request_senders = Vec::new(); + let mut request_receivers = BTreeMap::new(); + + let mut response_senders = Vec::new(); + let mut response_receivers = BTreeMap::new(); + + for i in 0..config.request_workers { + let (request_sender, request_receiver) = unbounded(); + + request_senders.push(request_sender); + request_receivers.insert(i, request_receiver); + } + + for i in 0..config.socket_workers { + let (response_sender, response_receiver) = unbounded(); + + response_senders.push(response_sender); + response_receivers.insert(i, response_receiver); + } for i in 0..config.request_workers { - let state = state.clone(); let config = config.clone(); - let request_receiver = request_receiver.clone(); - let response_sender = response_sender.clone(); + let request_receiver = request_receivers.remove(&i).unwrap().clone(); + let response_sender = ConnectedResponseSender::new(response_senders.clone()); Builder::new() .name(format!("request-{:02}", i + 1)) @@ -82,7 +99,7 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { WorkerIndex::RequestWorker(i), ); - handlers::run_request_worker(state, config, request_receiver, response_sender) + handlers::run_request_worker(config, request_receiver, response_sender) }) .with_context(|| "spawn request worker")?; } @@ -90,8 +107,9 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { for i in 0..config.socket_workers { let state = state.clone(); let config = config.clone(); - let request_sender = request_sender.clone(); - let response_receiver = response_receiver.clone(); + let request_sender = + ConnectedRequestSender::new(SocketWorkerIndex(i), request_senders.clone()); + let response_receiver = response_receivers.remove(&i).unwrap(); let num_bound_sockets = num_bound_sockets.clone(); Builder::new() @@ -128,6 +146,12 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { .with_context(|| "spawn socket worker")?; } + ::std::mem::drop(request_senders); + ::std::mem::drop(request_receivers); + + ::std::mem::drop(response_senders); + ::std::mem::drop(response_receivers); + if config.statistics.interval != 0 { let state = state.clone(); let config = config.clone(); diff --git a/aquatic_udp/src/lib/network_mio.rs b/aquatic_udp/src/lib/network_mio.rs index 35769cf..5e4cac1 100644 --- a/aquatic_udp/src/lib/network_mio.rs +++ b/aquatic_udp/src/lib/network_mio.rs @@ -9,7 +9,7 @@ use std::vec::Drain; use aquatic_common::access_list::create_access_list_cache; use aquatic_common::ValidUntil; -use crossbeam_channel::{Receiver, Sender}; +use crossbeam_channel::Receiver; use mio::net::UdpSocket; use mio::{Events, Interest, Poll, Token}; use rand::prelude::{SeedableRng, StdRng}; @@ -24,7 +24,7 @@ pub fn run_socket_worker( state: State, config: Config, token_num: usize, - request_sender: Sender<(ConnectedRequest, SocketAddr)>, + request_sender: ConnectedRequestSender, response_receiver: Receiver<(ConnectedResponse, SocketAddr)>, num_bound_sockets: Arc, ) { @@ -44,6 +44,7 @@ pub fn run_socket_worker( let mut events = Events::with_capacity(config.network.poll_event_capacity); let mut connections = ConnectionMap::default(); + let mut pending_scrape_responses = PendingScrapeResponseMap::default(); let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new(); @@ -66,6 +67,7 @@ pub fn run_socket_worker( &config, &state, &mut connections, + &mut pending_scrape_responses, &mut rng, &mut socket, &mut buffer, @@ -81,6 +83,7 @@ pub fn run_socket_worker( &mut socket, &mut buffer, &response_receiver, + &mut pending_scrape_responses, local_responses.drain(..), ); @@ -103,10 +106,11 @@ fn read_requests( config: &Config, state: &State, connections: &mut ConnectionMap, + pending_scrape_responses: &mut PendingScrapeResponseMap, rng: &mut StdRng, socket: &mut UdpSocket, buffer: &mut [u8], - request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + request_sender: &ConnectedRequestSender, local_responses: &mut Vec<(Response, SocketAddr)>, ) { let mut requests_received: usize = 0; @@ -147,6 +151,7 @@ fn read_requests( handle_request( config, connections, + pending_scrape_responses, &mut access_list_cache, rng, request_sender, @@ -185,60 +190,41 @@ fn send_responses( socket: &mut UdpSocket, buffer: &mut [u8], response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, + pending_scrape_responses: &mut PendingScrapeResponseMap, local_responses: Drain<(Response, SocketAddr)>, ) { let mut responses_sent: usize = 0; let mut bytes_sent: usize = 0; - let mut cursor = Cursor::new(buffer); + for (response, addr) in local_responses { + send_response( + config, + socket, + buffer, + &mut responses_sent, + &mut bytes_sent, + response, + addr, + ); + } - let response_iterator = local_responses.into_iter().chain( - response_receiver - .try_iter() - .map(|(response, addr)| (response.into(), addr)), - ); - - for (response, addr) in response_iterator { - cursor.set_position(0); - - let addr = if config.network.address.is_ipv4() { - if let SocketAddr::V4(addr) = addr { - SocketAddr::V4(addr) - } else { - unreachable!() - } - } else { - match addr { - SocketAddr::V4(addr) => { - let ip = addr.ip().to_ipv6_mapped(); - - SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0)) - } - addr => addr, - } + for (response, addr) in response_receiver.try_iter() { + let opt_response = match response { + ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r), + ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)), + ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)), }; - match response.write(&mut cursor) { - Ok(()) => { - let amt = cursor.position() as usize; - - match socket.send_to(&cursor.get_ref()[..amt], addr) { - Ok(amt) => { - responses_sent += 1; - bytes_sent += amt; - } - Err(err) => { - if err.kind() == ErrorKind::WouldBlock { - break; - } - - ::log::info!("send_to error: {}", err); - } - } - } - Err(err) => { - ::log::error!("Response::write error: {:?}", err); - } + if let Some(response) = opt_response { + send_response( + config, + socket, + buffer, + &mut responses_sent, + &mut bytes_sent, + response, + addr, + ); } } @@ -253,3 +239,51 @@ fn send_responses( .fetch_add(bytes_sent, Ordering::SeqCst); } } + +fn send_response( + config: &Config, + socket: &mut UdpSocket, + buffer: &mut [u8], + responses_sent: &mut usize, + bytes_sent: &mut usize, + response: Response, + addr: SocketAddr, +) { + let mut cursor = Cursor::new(buffer); + + let addr = if config.network.address.is_ipv4() { + if let SocketAddr::V4(addr) = addr { + SocketAddr::V4(addr) + } else { + unreachable!() + } + } else { + match addr { + SocketAddr::V4(addr) => { + let ip = addr.ip().to_ipv6_mapped(); + + SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0)) + } + addr => addr, + } + }; + + match response.write(&mut cursor) { + Ok(()) => { + let amt = cursor.position() as usize; + + match socket.send_to(&cursor.get_ref()[..amt], addr) { + Ok(amt) => { + *responses_sent += 1; + *bytes_sent += amt; + } + Err(err) => { + ::log::info!("send_to error: {}", err); + } + } + } + Err(err) => { + ::log::error!("Response::write error: {:?}", err); + } + } +} diff --git a/aquatic_udp/src/lib/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs index 7d56705..63be9cc 100644 --- a/aquatic_udp/src/lib/network_uring.rs +++ b/aquatic_udp/src/lib/network_uring.rs @@ -11,7 +11,7 @@ use std::time::{Duration, Instant}; use aquatic_common::access_list::create_access_list_cache; use aquatic_common::ValidUntil; -use crossbeam_channel::{Receiver, Sender}; +use crossbeam_channel::Receiver; use io_uring::types::{Fixed, Timespec}; use io_uring::SubmissionQueue; use libc::{ @@ -103,7 +103,7 @@ impl Into for UserData { pub fn run_socket_worker( state: State, config: Config, - request_sender: Sender<(ConnectedRequest, SocketAddr)>, + request_sender: ConnectedRequestSender, response_receiver: Receiver<(ConnectedResponse, SocketAddr)>, num_bound_sockets: Arc, ) { @@ -114,6 +114,7 @@ pub fn run_socket_worker( num_bound_sockets.fetch_add(1, Ordering::SeqCst); let mut connections = ConnectionMap::default(); + let mut pending_scrape_responses = PendingScrapeResponseMap::default(); let mut access_list_cache = create_access_list_cache(&state.access_list); let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new(); @@ -252,6 +253,7 @@ pub fn run_socket_worker( handle_request( &config, &mut connections, + &mut pending_scrape_responses, &mut access_list_cache, &mut rng, &request_sender, @@ -333,19 +335,27 @@ pub fn run_socket_worker( .try_iter() .take(MAX_SEND_EVENTS - send_entries.len()) { - queue_response( - &config, - &mut sq, - fd, - &mut send_entries, - &mut buffers, - &mut iovs, - &mut sockaddrs_ipv4, - &mut sockaddrs_ipv6, - &mut msghdrs, - response.into(), - addr, - ); + let opt_response = match response { + ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r), + ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)), + ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)), + }; + + if let Some(response) = opt_response { + queue_response( + &config, + &mut sq, + fd, + &mut send_entries, + &mut buffers, + &mut iovs, + &mut sockaddrs_ipv4, + &mut sockaddrs_ipv6, + &mut msghdrs, + response, + addr, + ); + } } if iter_counter % 32 == 0 { diff --git a/aquatic_udp_bench/src/announce.rs b/aquatic_udp_bench/src/announce.rs index 1354bdd..756bac7 100644 --- a/aquatic_udp_bench/src/announce.rs +++ b/aquatic_udp_bench/src/announce.rs @@ -16,7 +16,7 @@ use crate::config::BenchConfig; pub fn bench_announce_handler( bench_config: &BenchConfig, aquatic_config: &Config, - request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, rng: &mut impl Rng, info_hashes: &[InfoHash], @@ -38,7 +38,11 @@ pub fn bench_announce_handler( for request_chunk in requests.chunks(p) { for (request, src) in request_chunk { request_sender - .send((ConnectedRequest::Announce(request.clone()), *src)) + .send(( + SocketWorkerIndex(0), + ConnectedRequest::Announce(request.clone()), + *src, + )) .unwrap(); } diff --git a/aquatic_udp_bench/src/main.rs b/aquatic_udp_bench/src/main.rs index fb2895f..13c602e 100644 --- a/aquatic_udp_bench/src/main.rs +++ b/aquatic_udp_bench/src/main.rs @@ -39,21 +39,17 @@ fn main() { pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { // Setup common state, spawn request handlers - let state = State::default(); let aquatic_config = Config::default(); let (request_sender, request_receiver) = unbounded(); let (response_sender, response_receiver) = unbounded(); - for _ in 0..bench_config.num_threads { - let state = state.clone(); - let config = aquatic_config.clone(); - let request_receiver = request_receiver.clone(); - let response_sender = response_sender.clone(); + let response_sender = ConnectedResponseSender::new(vec![response_sender]); - ::std::thread::spawn(move || { - run_request_worker(state, config, request_receiver, response_sender) - }); + { + let config = aquatic_config.clone(); + + ::std::thread::spawn(move || run_request_worker(config, request_receiver, response_sender)); } // Run benchmarks diff --git a/aquatic_udp_bench/src/scrape.rs b/aquatic_udp_bench/src/scrape.rs index 39d6ade..4cef9c0 100644 --- a/aquatic_udp_bench/src/scrape.rs +++ b/aquatic_udp_bench/src/scrape.rs @@ -16,7 +16,7 @@ use crate::config::BenchConfig; pub fn bench_scrape_handler( bench_config: &BenchConfig, aquatic_config: &Config, - request_sender: &Sender<(ConnectedRequest, SocketAddr)>, + request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, rng: &mut impl Rng, info_hashes: &[InfoHash], @@ -42,20 +42,25 @@ pub fn bench_scrape_handler( for round in (0..bench_config.num_rounds).progress_with(pb) { for request_chunk in requests.chunks(p) { for (request, src) in request_chunk { - let request = ConnectedRequest::Scrape { - request: request.clone(), - original_indices: Vec::new(), - }; + let request = ConnectedRequest::Scrape(PendingScrapeRequest { + transaction_id: request.transaction_id, + info_hashes: request + .info_hashes + .clone() + .into_iter() + .enumerate() + .collect(), + }); - request_sender.send((request, *src)).unwrap(); + request_sender + .send((SocketWorkerIndex(0), request, *src)) + .unwrap(); } - while let Ok((ConnectedResponse::Scrape { response, .. }, _)) = - response_receiver.try_recv() - { + while let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.try_recv() { num_responses += 1; - if let Some(stat) = response.torrent_stats.last() { + if let Some(stat) = response.torrent_stats.values().last() { dummy ^= stat.leechers.0; } } @@ -64,10 +69,10 @@ pub fn bench_scrape_handler( let total = bench_config.num_scrape_requests * (round + 1); while num_responses < total { - if let Ok((ConnectedResponse::Scrape { response, .. }, _)) = response_receiver.recv() { + if let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.recv() { num_responses += 1; - if let Some(stat) = response.torrent_stats.last() { + if let Some(stat) = response.torrent_stats.values().last() { dummy ^= stat.leechers.0; } } From b6f6a2d73b9d2c70e9845dcc94df5d5e4fe12ee8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 01:59:48 +0100 Subject: [PATCH 18/56] udp: uring: attempt to send more responses per syscall --- aquatic_udp/src/lib/network_uring.rs | 55 +++++++++++++--------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/aquatic_udp/src/lib/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs index 63be9cc..c0c2c64 100644 --- a/aquatic_udp/src/lib/network_uring.rs +++ b/aquatic_udp/src/lib/network_uring.rs @@ -175,8 +175,10 @@ pub fn run_socket_worker( }) .collect(); - let timeout = Timespec::new().nsec(500_000_000); - let mut timeout_set = false; + let timeout = Timespec::new().sec(1); + + let mut force_send_responses = false; + let mut timeout_queued = false; let mut recv_entries = Slab::with_capacity(MAX_RECV_EVENTS); let mut send_entries = Slab::with_capacity(MAX_SEND_EVENTS); @@ -275,7 +277,8 @@ pub fn run_socket_worker( } } UserData::Timeout => { - timeout_set = false; + force_send_responses = true; + timeout_queued = false; } } } @@ -295,8 +298,8 @@ pub fn run_socket_worker( } } - if !timeout_set { - // Setup timer to occasionally check if there are pending responses + if !timeout_queued { + // Setup timer to occasionally force sending of responses let user_data = UserData::Timeout; let timespec_ptr: *const Timespec = &timeout; @@ -309,32 +312,10 @@ pub fn run_socket_worker( sq.push(&entry).unwrap(); } - timeout_set = true; + timeout_queued = true; } - let num_local_to_queue = (MAX_SEND_EVENTS - send_entries.len()).min(local_responses.len()); - - for (response, addr) in local_responses.drain(local_responses.len() - num_local_to_queue..) - { - queue_response( - &config, - &mut sq, - fd, - &mut send_entries, - &mut buffers, - &mut iovs, - &mut sockaddrs_ipv4, - &mut sockaddrs_ipv6, - &mut msghdrs, - response, - addr, - ); - } - - for (response, addr) in response_receiver - .try_iter() - .take(MAX_SEND_EVENTS - send_entries.len()) - { + for (response, addr) in response_receiver.try_iter() { let opt_response = match response { ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r), ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)), @@ -342,6 +323,18 @@ pub fn run_socket_worker( }; if let Some(response) = opt_response { + local_responses.push((response, addr)); + } + } + + let space_in_send_queue = MAX_SEND_EVENTS - send_entries.len(); + + if force_send_responses | (local_responses.len() >= space_in_send_queue) { + let num_to_queue = (space_in_send_queue).min(local_responses.len()); + let drain_from_index = local_responses.len() - num_to_queue; + + for (response, addr) in local_responses.drain(drain_from_index..) + { queue_response( &config, &mut sq, @@ -356,6 +349,10 @@ pub fn run_socket_worker( addr, ); } + + if local_responses.is_empty() { + force_send_responses = false; + } } if iter_counter % 32 == 0 { From 42d99cefee448ccad81b24216e3a2057e030e1e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 02:25:51 +0100 Subject: [PATCH 19/56] udp: uring: tune --- aquatic_udp/src/lib/network_uring.rs | 44 +++++++++++++--------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/aquatic_udp/src/lib/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs index c0c2c64..22263b3 100644 --- a/aquatic_udp/src/lib/network_uring.rs +++ b/aquatic_udp/src/lib/network_uring.rs @@ -175,7 +175,7 @@ pub fn run_socket_worker( }) .collect(); - let timeout = Timespec::new().sec(1); + let timeout = Timespec::new().nsec(100_000_000); let mut force_send_responses = false; let mut timeout_queued = false; @@ -298,23 +298,6 @@ pub fn run_socket_worker( } } - if !timeout_queued { - // Setup timer to occasionally force sending of responses - let user_data = UserData::Timeout; - - let timespec_ptr: *const Timespec = &timeout; - - let entry = io_uring::opcode::Timeout::new(timespec_ptr) - .build() - .user_data(user_data.into()); - - unsafe { - sq.push(&entry).unwrap(); - } - - timeout_queued = true; - } - for (response, addr) in response_receiver.try_iter() { let opt_response = match response { ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r), @@ -355,6 +338,23 @@ pub fn run_socket_worker( } } + if !timeout_queued & !force_send_responses { + // Setup timer to occasionally force sending of responses + let user_data = UserData::Timeout; + + let timespec_ptr: *const Timespec = &timeout; + + let entry = io_uring::opcode::Timeout::new(timespec_ptr) + .build() + .user_data(user_data.into()); + + unsafe { + sq.push(&entry).unwrap(); + } + + timeout_queued = true; + } + if iter_counter % 32 == 0 { let now = Instant::now(); @@ -365,12 +365,10 @@ pub fn run_socket_worker( } } - let all_responses_sent = local_responses.is_empty() & response_receiver.is_empty(); - - let wait_for_num = if all_responses_sent { - send_entries.len() + recv_entries.len() - } else { + let wait_for_num = if force_send_responses { send_entries.len() + } else { + send_entries.len() + recv_entries.len() }; sq.sync(); From 5d8a4dd38c65c7f4d1b573a1ef703a89be2c0145 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 23:03:27 +0100 Subject: [PATCH 20/56] udp load test: default to multiple client ips; improve docs --- aquatic_udp_load_test/src/common.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/aquatic_udp_load_test/src/common.rs b/aquatic_udp_load_test/src/common.rs index 98ef435..6339f32 100644 --- a/aquatic_udp_load_test/src/common.rs +++ b/aquatic_udp_load_test/src/common.rs @@ -40,9 +40,8 @@ pub struct Config { pub struct NetworkConfig { /// True means bind to one localhost IP per socket. /// - /// The point of multiple IPs is to possibly cause a better distribution - /// of requests to servers with SO_REUSEPORT option, but it doesn't - /// necessarily help. + /// The point of multiple IPs is to cause a better distribution + /// of requests to servers with SO_REUSEPORT option. /// /// Setting this to true can cause issues on macOS. pub multiple_client_ipv4s: bool, @@ -120,7 +119,7 @@ impl Default for Config { impl Default for NetworkConfig { fn default() -> Self { Self { - multiple_client_ipv4s: false, + multiple_client_ipv4s: true, first_port: 45_000, poll_timeout: 276, poll_event_capacity: 2_877, From 7afaa2702ad92b8055d0eb539edb34fbbfe01972 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 23:06:29 +0100 Subject: [PATCH 21/56] udp: config: request worker timeout in ms instead of microseconds --- aquatic_udp/src/lib/config.rs | 4 ++-- aquatic_udp/src/lib/handlers.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index 8f2b8a2..9bd5bc7 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -75,7 +75,7 @@ pub struct HandlerConfig { /// Maximum number of requests to receive from channel before locking /// mutex and starting work pub max_requests_per_iter: usize, - pub channel_recv_timeout_microseconds: u64, + pub channel_recv_timeout_ms: u64, } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -143,7 +143,7 @@ impl Default for HandlerConfig { fn default() -> Self { Self { max_requests_per_iter: 10000, - channel_recv_timeout_microseconds: 200, + channel_recv_timeout_ms: 100, } } } diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index 44c934d..1f478c4 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -22,7 +22,7 @@ pub fn run_request_worker( let mut torrents = TorrentMaps::default(); let mut small_rng = SmallRng::from_entropy(); - let timeout = Duration::from_micros(config.handlers.channel_recv_timeout_microseconds); + let timeout = Duration::from_millis(config.handlers.channel_recv_timeout_ms); loop { if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) { From 5ef0935c9778d90ce4b69c7b06daddbb1eec2da5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 23:09:13 +0100 Subject: [PATCH 22/56] udp: remove unused config var max_requests_per_iter --- aquatic_udp/src/lib/config.rs | 4 ---- aquatic_udp_bench/src/announce.rs | 4 +--- aquatic_udp_bench/src/main.rs | 2 -- aquatic_udp_bench/src/scrape.rs | 4 +--- 4 files changed, 2 insertions(+), 12 deletions(-) diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index 9bd5bc7..e9b3108 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -72,9 +72,6 @@ pub struct ProtocolConfig { #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct HandlerConfig { - /// Maximum number of requests to receive from channel before locking - /// mutex and starting work - pub max_requests_per_iter: usize, pub channel_recv_timeout_ms: u64, } @@ -142,7 +139,6 @@ impl Default for ProtocolConfig { impl Default for HandlerConfig { fn default() -> Self { Self { - max_requests_per_iter: 10000, channel_recv_timeout_ms: 100, } } diff --git a/aquatic_udp_bench/src/announce.rs b/aquatic_udp_bench/src/announce.rs index 756bac7..1277c4e 100644 --- a/aquatic_udp_bench/src/announce.rs +++ b/aquatic_udp_bench/src/announce.rs @@ -7,7 +7,6 @@ use rand::Rng; use rand_distr::Pareto; use aquatic_udp::common::*; -use aquatic_udp::config::Config; use aquatic_udp_protocol::*; use crate::common::*; @@ -15,7 +14,6 @@ use crate::config::BenchConfig; pub fn bench_announce_handler( bench_config: &BenchConfig, - aquatic_config: &Config, request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, rng: &mut impl Rng, @@ -23,7 +21,7 @@ pub fn bench_announce_handler( ) -> (usize, Duration) { let requests = create_requests(rng, info_hashes, bench_config.num_announce_requests); - let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads; + let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers let mut num_responses = 0usize; let mut dummy: u16 = rng.gen(); diff --git a/aquatic_udp_bench/src/main.rs b/aquatic_udp_bench/src/main.rs index 13c602e..1bc0034 100644 --- a/aquatic_udp_bench/src/main.rs +++ b/aquatic_udp_bench/src/main.rs @@ -59,7 +59,6 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { let a = announce::bench_announce_handler( &bench_config, - &aquatic_config, &request_sender, &response_receiver, &mut rng, @@ -68,7 +67,6 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { let s = scrape::bench_scrape_handler( &bench_config, - &aquatic_config, &request_sender, &response_receiver, &mut rng, diff --git a/aquatic_udp_bench/src/scrape.rs b/aquatic_udp_bench/src/scrape.rs index 4cef9c0..fc058cb 100644 --- a/aquatic_udp_bench/src/scrape.rs +++ b/aquatic_udp_bench/src/scrape.rs @@ -7,7 +7,6 @@ use rand::Rng; use rand_distr::Pareto; use aquatic_udp::common::*; -use aquatic_udp::config::Config; use aquatic_udp_protocol::*; use crate::common::*; @@ -15,7 +14,6 @@ use crate::config::BenchConfig; pub fn bench_scrape_handler( bench_config: &BenchConfig, - aquatic_config: &Config, request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>, rng: &mut impl Rng, @@ -28,7 +26,7 @@ pub fn bench_scrape_handler( bench_config.num_hashes_per_scrape_request, ); - let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads; + let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers let mut num_responses = 0usize; let mut dummy: i32 = rng.gen(); From 2aa94d050b55237e5f6b4f12464f44c74ef545fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Tue, 16 Nov 2021 23:49:36 +0100 Subject: [PATCH 23/56] Update TODO --- TODO.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/TODO.md b/TODO.md index 8ae2800..e5acc4b 100644 --- a/TODO.md +++ b/TODO.md @@ -17,17 +17,19 @@ * cargo-deny * aquatic_udp - * shard torrent state - * old note that might be useful: - * consider adding ConnectedScrapeRequest::Scrape(PendingScrapeRequest) - containing TransactionId and BTreeMap, and same for - response + * notes + * load testing shows that with sharded state, mio reaches 1.4M responses per second + with 6 socket and 4 request workers. performance is great overall and faster than + without sharding. io_uring impl is slightly behind or slighly ahead of mio, but + nothing justifying code complexity and unsafety + * clean torrent map in workers, remove it from shared state + * consider rewriting load test to just have one worker type. Connection state + should/could be divided by socket worker anyway? + * mio + * stagger connection cleaning intervals? * uring * ValidUntil periodic update * statistics - * shared config keys such as poll interval - * mio - * stagger connection cleaning intervals? * aquatic_http: * clean out connections regularly From 5440157a9518302a19dadfd2e4cec00aef6034f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Wed, 17 Nov 2021 00:51:15 +0100 Subject: [PATCH 24/56] udp load test: use only one type of worker for better performance --- TODO.md | 2 - aquatic_udp_load_test/src/common.rs | 18 +--- aquatic_udp_load_test/src/handler.rs | 124 +-------------------------- aquatic_udp_load_test/src/main.rs | 52 ++--------- aquatic_udp_load_test/src/network.rs | 77 ++++++++++------- 5 files changed, 54 insertions(+), 219 deletions(-) diff --git a/TODO.md b/TODO.md index e5acc4b..6172add 100644 --- a/TODO.md +++ b/TODO.md @@ -23,8 +23,6 @@ without sharding. io_uring impl is slightly behind or slighly ahead of mio, but nothing justifying code complexity and unsafety * clean torrent map in workers, remove it from shared state - * consider rewriting load test to just have one worker type. Connection state - should/could be divided by socket worker anyway? * mio * stagger connection cleaning intervals? * uring diff --git a/aquatic_udp_load_test/src/common.rs b/aquatic_udp_load_test/src/common.rs index 6339f32..85474fd 100644 --- a/aquatic_udp_load_test/src/common.rs +++ b/aquatic_udp_load_test/src/common.rs @@ -5,7 +5,6 @@ use aquatic_cli_helpers::LogLevel; #[cfg(feature = "cpu-pinning")] use aquatic_common::cpu_pinning::CpuPinningConfig; use hashbrown::HashMap; -use parking_lot::Mutex; use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::*; @@ -22,11 +21,7 @@ pub struct Config { /// address here. pub server_address: SocketAddr, pub log_level: LogLevel, - /// Number of sockets and socket worker threads - pub num_socket_workers: u8, - /// Number of workers generating requests from responses, as well as - /// requests not connected to previous ones. - pub num_request_workers: usize, + pub workers: u8, /// Run duration (quit and generate report after this many seconds) pub duration: usize, pub network: NetworkConfig, @@ -75,8 +70,6 @@ pub struct HandlerConfig { pub number_of_torrents: usize, /// Maximum number of torrents to ask about in scrape requests pub scrape_max_torrents: usize, - /// Handler: max number of responses to collect for before processing - pub max_responses_per_iter: usize, /// Probability that a generated request is a connect request as part /// of sum of the various weight arguments. pub weight_connect: usize, @@ -86,8 +79,6 @@ pub struct HandlerConfig { /// Probability that a generated request is a scrape request, as part /// of sum of the various weight arguments. pub weight_scrape: usize, - /// Handler: max microseconds to wait for single response from channel - pub channel_timeout: u64, /// Pareto shape /// /// Fake peers choose torrents according to Pareto distribution. @@ -105,8 +96,7 @@ impl Default for Config { Self { server_address: "127.0.0.1:3000".parse().unwrap(), log_level: LogLevel::Error, - num_socket_workers: 1, - num_request_workers: 1, + workers: 1, duration: 0, network: NetworkConfig::default(), handler: HandlerConfig::default(), @@ -138,8 +128,6 @@ impl Default for HandlerConfig { weight_announce: 1, weight_scrape: 1, additional_request_factor: 0.4, - max_responses_per_iter: 10_000, - channel_timeout: 200, torrent_selection_pareto_shape: 2.0, } } @@ -168,9 +156,9 @@ pub struct Statistics { #[derive(Clone)] pub struct LoadTestState { - pub torrent_peers: Arc>, pub info_hashes: Arc>, pub statistics: Arc, + pub responses: Arc, } #[derive(PartialEq, Eq, Clone, Copy)] diff --git a/aquatic_udp_load_test/src/handler.rs b/aquatic_udp_load_test/src/handler.rs index e690ca4..cc98c27 100644 --- a/aquatic_udp_load_test/src/handler.rs +++ b/aquatic_udp_load_test/src/handler.rs @@ -1,9 +1,5 @@ use std::sync::Arc; -use std::time::Duration; -use std::vec::Drain; -use crossbeam_channel::{Receiver, Sender}; -use parking_lot::MutexGuard; use rand::distributions::WeightedIndex; use rand::prelude::*; use rand_distr::Pareto; @@ -13,125 +9,7 @@ use aquatic_udp_protocol::*; use crate::common::*; use crate::utils::*; -pub fn run_handler_thread( - config: &Config, - state: LoadTestState, - pareto: Pareto, - request_senders: Vec>, - response_receiver: Receiver<(ThreadId, Response)>, -) { - let state = &state; - - let mut rng1 = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()"); - let mut rng2 = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()"); - - let timeout = Duration::from_micros(config.handler.channel_timeout); - - let mut responses = Vec::new(); - - loop { - let mut opt_torrent_peers = None; - - // Collect a maximum number of responses. Stop collecting before that - // number is reached if having waited for too long for a request, but - // only if ConnectionMap mutex isn't locked. - for i in 0..config.handler.max_responses_per_iter { - let response = if i == 0 { - match response_receiver.recv() { - Ok(r) => r, - Err(_) => break, // Really shouldn't happen - } - } else { - match response_receiver.recv_timeout(timeout) { - Ok(r) => r, - Err(_) => { - if let Some(guard) = state.torrent_peers.try_lock() { - opt_torrent_peers = Some(guard); - - break; - } else { - continue; - } - } - } - }; - - responses.push(response); - } - - let mut torrent_peers: MutexGuard = - opt_torrent_peers.unwrap_or_else(|| state.torrent_peers.lock()); - - let requests = process_responses( - &mut rng1, - pareto, - &state.info_hashes, - config, - &mut torrent_peers, - responses.drain(..), - ); - - // Somewhat dubious heuristic for deciding how fast to create - // and send additional requests (requests not having anything - // to do with previously sent requests) - let num_additional_to_send = { - let num_additional_requests = requests.iter().map(|v| v.len()).sum::() as f64; - - let num_new_requests_per_socket = - num_additional_requests / config.num_socket_workers as f64; - - ((num_new_requests_per_socket / 1.2) * config.handler.additional_request_factor) - as usize - + 10 - }; - - for (channel_index, new_requests) in requests.into_iter().enumerate() { - let channel = &request_senders[channel_index]; - - for _ in 0..num_additional_to_send { - let request = create_connect_request(generate_transaction_id(&mut rng2)); - - channel - .send(request) - .expect("send request to channel in handler worker"); - } - - for request in new_requests.into_iter() { - channel - .send(request) - .expect("send request to channel in handler worker"); - } - } - } -} - -fn process_responses( - rng: &mut impl Rng, - pareto: Pareto, - info_hashes: &Arc>, - config: &Config, - torrent_peers: &mut TorrentPeerMap, - responses: Drain<(ThreadId, Response)>, -) -> Vec> { - let mut new_requests = Vec::with_capacity(config.num_socket_workers as usize); - - for _ in 0..config.num_socket_workers { - new_requests.push(Vec::new()); - } - - for (socket_thread_id, response) in responses.into_iter() { - let opt_request = - process_response(rng, pareto, info_hashes, &config, torrent_peers, response); - - if let Some(new_request) = opt_request { - new_requests[socket_thread_id.0 as usize].push(new_request); - } - } - - new_requests -} - -fn process_response( +pub fn process_response( rng: &mut impl Rng, pareto: Pareto, info_hashes: &Arc>, diff --git a/aquatic_udp_load_test/src/main.rs b/aquatic_udp_load_test/src/main.rs index 0b34bb2..50468c8 100644 --- a/aquatic_udp_load_test/src/main.rs +++ b/aquatic_udp_load_test/src/main.rs @@ -5,10 +5,6 @@ use std::time::{Duration, Instant}; #[cfg(feature = "cpu-pinning")] use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex}; -use crossbeam_channel::unbounded; -use hashbrown::HashMap; -use parking_lot::Mutex; -use rand::prelude::*; use rand_distr::Pareto; mod common; @@ -17,7 +13,6 @@ mod network; mod utils; use common::*; -use handler::run_handler_thread; use network::*; use utils::*; @@ -54,22 +49,17 @@ fn run(config: Config) -> ::anyhow::Result<()> { } let state = LoadTestState { - torrent_peers: Arc::new(Mutex::new(HashMap::new())), info_hashes: Arc::new(info_hashes), statistics: Arc::new(Statistics::default()), + responses: Default::default(), }; let pareto = Pareto::new(1.0, config.handler.torrent_selection_pareto_shape).unwrap(); - // Start socket workers + // Start workers - let (response_sender, response_receiver) = unbounded(); - - let mut request_senders = Vec::new(); - - for i in 0..config.num_socket_workers { + for i in 0..config.workers { let thread_id = ThreadId(i); - let (sender, receiver) = unbounded(); let port = config.network.first_port + (i as u16); let ip = if config.server_address.is_ipv6() { @@ -83,55 +73,25 @@ fn run(config: Config) -> ::anyhow::Result<()> { }; let addr = SocketAddr::new(ip, port); - - request_senders.push(sender); - let config = config.clone(); - let response_sender = response_sender.clone(); let state = state.clone(); thread::spawn(move || { #[cfg(feature = "cpu-pinning")] pin_current_if_configured_to( &config.cpu_pinning, - config.num_socket_workers as usize, + config.workers as usize, WorkerIndex::SocketWorker(i as usize), ); - run_socket_thread(state, response_sender, receiver, &config, addr, thread_id) + run_worker_thread(state, pareto, &config, addr, thread_id) }); } - for i in 0..config.num_request_workers { - let config = config.clone(); - let state = state.clone(); - let request_senders = request_senders.clone(); - let response_receiver = response_receiver.clone(); - - thread::spawn(move || { - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.num_socket_workers as usize, - WorkerIndex::RequestWorker(i as usize), - ); - run_handler_thread(&config, state, pareto, request_senders, response_receiver) - }); - } - - // Bootstrap request cycle by adding a request to each request channel - for sender in request_senders.iter() { - let request = create_connect_request(generate_transaction_id(&mut thread_rng())); - - sender - .send(request) - .expect("bootstrap: add initial request to request queue"); - } - #[cfg(feature = "cpu-pinning")] pin_current_if_configured_to( &config.cpu_pinning, - config.num_socket_workers as usize, + config.workers as usize, WorkerIndex::Other, ); diff --git a/aquatic_udp_load_test/src/network.rs b/aquatic_udp_load_test/src/network.rs index 358a27b..79fc3e5 100644 --- a/aquatic_udp_load_test/src/network.rs +++ b/aquatic_udp_load_test/src/network.rs @@ -1,15 +1,16 @@ -use std::io::Cursor; +use std::{io::Cursor, vec::Drain}; use std::net::SocketAddr; use std::sync::atomic::Ordering; use std::time::Duration; -use crossbeam_channel::{Receiver, Sender}; use mio::{net::UdpSocket, Events, Interest, Poll, Token}; +use rand::{SeedableRng, prelude::SmallRng, thread_rng}; +use rand_distr::Pareto; use socket2::{Domain, Protocol, Socket, Type}; use aquatic_udp_protocol::*; -use crate::common::*; +use crate::{common::*, handler::{process_response}, utils::*}; const MAX_PACKET_SIZE: usize = 4096; @@ -45,10 +46,9 @@ pub fn create_socket(config: &Config, addr: SocketAddr) -> ::std::net::UdpSocket socket.into() } -pub fn run_socket_thread( +pub fn run_worker_thread( state: LoadTestState, - response_channel_sender: Sender<(ThreadId, Response)>, - request_receiver: Receiver, + pareto: Pareto, config: &Config, addr: SocketAddr, thread_id: ThreadId, @@ -56,6 +56,9 @@ pub fn run_socket_thread( let mut socket = UdpSocket::from_std(create_socket(config, addr)); let mut buffer = [0u8; MAX_PACKET_SIZE]; + let mut rng = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()"); + let mut torrent_peers = TorrentPeerMap::default(); + let token = Token(thread_id.0 as usize); let interests = Interest::READABLE; let timeout = Duration::from_micros(config.network.poll_timeout); @@ -70,6 +73,10 @@ pub fn run_socket_thread( let mut local_state = SocketWorkerLocalStatistics::default(); let mut responses = Vec::new(); + let mut requests = Vec::new(); + + // Bootstrap request cycle by adding a request + requests.push(create_connect_request(generate_transaction_id(&mut thread_rng()))); loop { poll.poll(&mut events, Some(timeout)) @@ -78,52 +85,56 @@ pub fn run_socket_thread( for event in events.iter() { if (event.token() == token) & event.is_readable() { read_responses( - thread_id, &socket, &mut buffer, &mut local_state, &mut responses, ); - - for r in responses.drain(..) { - response_channel_sender.send(r).unwrap_or_else(|err| { - panic!( - "add response to channel in socket worker {}: {:?}", - thread_id.0, err - ) - }); - } - - poll.registry() - .reregister(&mut socket, token, interests) - .unwrap(); } + } - send_requests( - &state, - &mut socket, - &mut buffer, - &request_receiver, - &mut local_state, - ); + let total_responses = responses.len() + if thread_id.0 == 0 { + state.responses.fetch_and(0, Ordering::SeqCst) + } else { + state.responses.fetch_add(responses.len(), Ordering::SeqCst) + }; + + // Somewhat dubious heuristic for deciding how fast to create + // and send additional requests + let num_additional_to_send = { + let n = total_responses as f64 / (config.workers as f64 * 4.0); + + (n * config.handler.additional_request_factor) as usize + 10 + }; + + for _ in 0..num_additional_to_send { + requests.push(create_connect_request(generate_transaction_id(&mut rng))); + } + + for response in responses.drain(..) { + let opt_request = + process_response(&mut rng, pareto, &state.info_hashes, &config, &mut torrent_peers, response); + + if let Some(new_request) = opt_request { + requests.push(new_request); + } } send_requests( &state, &mut socket, &mut buffer, - &request_receiver, &mut local_state, + requests.drain(..), ); } } fn read_responses( - thread_id: ThreadId, socket: &UdpSocket, buffer: &mut [u8], ls: &mut SocketWorkerLocalStatistics, - responses: &mut Vec<(ThreadId, Response)>, + responses: &mut Vec, ) { while let Ok(amt) = socket.recv(buffer) { match Response::from_bytes(&buffer[0..amt]) { @@ -148,7 +159,7 @@ fn read_responses( } } - responses.push((thread_id, response)) + responses.push(response) } Err(err) => { eprintln!("Received invalid response: {:#?}", err); @@ -161,12 +172,12 @@ fn send_requests( state: &LoadTestState, socket: &mut UdpSocket, buffer: &mut [u8], - receiver: &Receiver, statistics: &mut SocketWorkerLocalStatistics, + requests: Drain, ) { let mut cursor = Cursor::new(buffer); - while let Ok(request) = receiver.try_recv() { + for request in requests { cursor.set_position(0); if let Err(err) = request.write(&mut cursor) { From c5bf3901eaec234bcd289fceec6c03c3ce4844de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Wed, 17 Nov 2021 00:52:19 +0100 Subject: [PATCH 25/56] run cargo fmt --- aquatic_udp/src/lib/network_uring.rs | 3 +-- aquatic_udp_load_test/src/network.rs | 38 +++++++++++++++------------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/aquatic_udp/src/lib/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs index 22263b3..72c0e28 100644 --- a/aquatic_udp/src/lib/network_uring.rs +++ b/aquatic_udp/src/lib/network_uring.rs @@ -316,8 +316,7 @@ pub fn run_socket_worker( let num_to_queue = (space_in_send_queue).min(local_responses.len()); let drain_from_index = local_responses.len() - num_to_queue; - for (response, addr) in local_responses.drain(drain_from_index..) - { + for (response, addr) in local_responses.drain(drain_from_index..) { queue_response( &config, &mut sq, diff --git a/aquatic_udp_load_test/src/network.rs b/aquatic_udp_load_test/src/network.rs index 79fc3e5..8b816e7 100644 --- a/aquatic_udp_load_test/src/network.rs +++ b/aquatic_udp_load_test/src/network.rs @@ -1,16 +1,16 @@ -use std::{io::Cursor, vec::Drain}; use std::net::SocketAddr; use std::sync::atomic::Ordering; use std::time::Duration; +use std::{io::Cursor, vec::Drain}; use mio::{net::UdpSocket, Events, Interest, Poll, Token}; -use rand::{SeedableRng, prelude::SmallRng, thread_rng}; +use rand::{prelude::SmallRng, thread_rng, SeedableRng}; use rand_distr::Pareto; use socket2::{Domain, Protocol, Socket, Type}; use aquatic_udp_protocol::*; -use crate::{common::*, handler::{process_response}, utils::*}; +use crate::{common::*, handler::process_response, utils::*}; const MAX_PACKET_SIZE: usize = 4096; @@ -76,7 +76,9 @@ pub fn run_worker_thread( let mut requests = Vec::new(); // Bootstrap request cycle by adding a request - requests.push(create_connect_request(generate_transaction_id(&mut thread_rng()))); + requests.push(create_connect_request(generate_transaction_id( + &mut thread_rng(), + ))); loop { poll.poll(&mut events, Some(timeout)) @@ -84,20 +86,16 @@ pub fn run_worker_thread( for event in events.iter() { if (event.token() == token) & event.is_readable() { - read_responses( - &socket, - &mut buffer, - &mut local_state, - &mut responses, - ); + read_responses(&socket, &mut buffer, &mut local_state, &mut responses); } } - let total_responses = responses.len() + if thread_id.0 == 0 { - state.responses.fetch_and(0, Ordering::SeqCst) - } else { - state.responses.fetch_add(responses.len(), Ordering::SeqCst) - }; + let total_responses = responses.len() + + if thread_id.0 == 0 { + state.responses.fetch_and(0, Ordering::SeqCst) + } else { + state.responses.fetch_add(responses.len(), Ordering::SeqCst) + }; // Somewhat dubious heuristic for deciding how fast to create // and send additional requests @@ -112,8 +110,14 @@ pub fn run_worker_thread( } for response in responses.drain(..) { - let opt_request = - process_response(&mut rng, pareto, &state.info_hashes, &config, &mut torrent_peers, response); + let opt_request = process_response( + &mut rng, + pareto, + &state.info_hashes, + &config, + &mut torrent_peers, + response, + ); if let Some(new_request) = opt_request { requests.push(new_request); From eb511c3a4cb4736766a44d5762f7d3eede1d930b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Wed, 17 Nov 2021 00:58:08 +0100 Subject: [PATCH 26/56] udp load test: rewrite network loop, default to weight_announce=5 --- aquatic_udp_load_test/src/common.rs | 8 +- aquatic_udp_load_test/src/main.rs | 1 - aquatic_udp_load_test/src/network.rs | 167 +++++++++++---------------- 3 files changed, 66 insertions(+), 110 deletions(-) diff --git a/aquatic_udp_load_test/src/common.rs b/aquatic_udp_load_test/src/common.rs index 85474fd..fe81667 100644 --- a/aquatic_udp_load_test/src/common.rs +++ b/aquatic_udp_load_test/src/common.rs @@ -85,10 +85,6 @@ pub struct HandlerConfig { pub torrent_selection_pareto_shape: f64, /// Probability that a generated peer is a seeder pub peer_seeder_probability: f64, - /// Part of additional request creation calculation, meaning requests - /// which are not dependent on previous responses from server. Higher - /// means more. - pub additional_request_factor: f64, } impl Default for Config { @@ -125,9 +121,8 @@ impl Default for HandlerConfig { peer_seeder_probability: 0.25, scrape_max_torrents: 50, weight_connect: 0, - weight_announce: 1, + weight_announce: 5, weight_scrape: 1, - additional_request_factor: 0.4, torrent_selection_pareto_shape: 2.0, } } @@ -158,7 +153,6 @@ pub struct Statistics { pub struct LoadTestState { pub info_hashes: Arc>, pub statistics: Arc, - pub responses: Arc, } #[derive(PartialEq, Eq, Clone, Copy)] diff --git a/aquatic_udp_load_test/src/main.rs b/aquatic_udp_load_test/src/main.rs index 50468c8..9bd4dc4 100644 --- a/aquatic_udp_load_test/src/main.rs +++ b/aquatic_udp_load_test/src/main.rs @@ -51,7 +51,6 @@ fn run(config: Config) -> ::anyhow::Result<()> { let state = LoadTestState { info_hashes: Arc::new(info_hashes), statistics: Arc::new(Statistics::default()), - responses: Default::default(), }; let pareto = Pareto::new(1.0, config.handler.torrent_selection_pareto_shape).unwrap(); diff --git a/aquatic_udp_load_test/src/network.rs b/aquatic_udp_load_test/src/network.rs index 8b816e7..a74e28c 100644 --- a/aquatic_udp_load_test/src/network.rs +++ b/aquatic_udp_load_test/src/network.rs @@ -1,7 +1,7 @@ +use std::io::Cursor; use std::net::SocketAddr; use std::sync::atomic::Ordering; use std::time::Duration; -use std::{io::Cursor, vec::Drain}; use mio::{net::UdpSocket, Events, Interest, Poll, Token}; use rand::{prelude::SmallRng, thread_rng, SeedableRng}; @@ -71,14 +71,11 @@ pub fn run_worker_thread( let mut events = Events::with_capacity(config.network.poll_event_capacity); - let mut local_state = SocketWorkerLocalStatistics::default(); - let mut responses = Vec::new(); - let mut requests = Vec::new(); + let mut statistics = SocketWorkerLocalStatistics::default(); - // Bootstrap request cycle by adding a request - requests.push(create_connect_request(generate_transaction_id( - &mut thread_rng(), - ))); + // Bootstrap request cycle + let initial_request = create_connect_request(generate_transaction_id(&mut thread_rng())); + send_request(&mut socket, &mut buffer, &mut statistics, initial_request); loop { poll.poll(&mut events, Some(timeout)) @@ -86,121 +83,87 @@ pub fn run_worker_thread( for event in events.iter() { if (event.token() == token) & event.is_readable() { - read_responses(&socket, &mut buffer, &mut local_state, &mut responses); - } - } + while let Ok(amt) = socket.recv(&mut buffer) { + match Response::from_bytes(&buffer[0..amt]) { + Ok(response) => { + match response { + Response::AnnounceIpv4(ref r) => { + statistics.responses_announce += 1; + statistics.response_peers += r.peers.len(); + } + Response::AnnounceIpv6(ref r) => { + statistics.responses_announce += 1; + statistics.response_peers += r.peers.len(); + } + Response::Scrape(_) => { + statistics.responses_scrape += 1; + } + Response::Connect(_) => { + statistics.responses_connect += 1; + } + Response::Error(_) => { + statistics.responses_error += 1; + } + } - let total_responses = responses.len() - + if thread_id.0 == 0 { - state.responses.fetch_and(0, Ordering::SeqCst) - } else { - state.responses.fetch_add(responses.len(), Ordering::SeqCst) - }; + let opt_request = process_response( + &mut rng, + pareto, + &state.info_hashes, + &config, + &mut torrent_peers, + response, + ); - // Somewhat dubious heuristic for deciding how fast to create - // and send additional requests - let num_additional_to_send = { - let n = total_responses as f64 / (config.workers as f64 * 4.0); - - (n * config.handler.additional_request_factor) as usize + 10 - }; - - for _ in 0..num_additional_to_send { - requests.push(create_connect_request(generate_transaction_id(&mut rng))); - } - - for response in responses.drain(..) { - let opt_request = process_response( - &mut rng, - pareto, - &state.info_hashes, - &config, - &mut torrent_peers, - response, - ); - - if let Some(new_request) = opt_request { - requests.push(new_request); - } - } - - send_requests( - &state, - &mut socket, - &mut buffer, - &mut local_state, - requests.drain(..), - ); - } -} - -fn read_responses( - socket: &UdpSocket, - buffer: &mut [u8], - ls: &mut SocketWorkerLocalStatistics, - responses: &mut Vec, -) { - while let Ok(amt) = socket.recv(buffer) { - match Response::from_bytes(&buffer[0..amt]) { - Ok(response) => { - match response { - Response::AnnounceIpv4(ref r) => { - ls.responses_announce += 1; - ls.response_peers += r.peers.len(); - } - Response::AnnounceIpv6(ref r) => { - ls.responses_announce += 1; - ls.response_peers += r.peers.len(); - } - Response::Scrape(_) => { - ls.responses_scrape += 1; - } - Response::Connect(_) => { - ls.responses_connect += 1; - } - Response::Error(_) => { - ls.responses_error += 1; + if let Some(request) = opt_request { + send_request(&mut socket, &mut buffer, &mut statistics, request); + } + } + Err(err) => { + eprintln!("Received invalid response: {:#?}", err); + } } } - responses.push(response) - } - Err(err) => { - eprintln!("Received invalid response: {:#?}", err); + let additional_request = create_connect_request(generate_transaction_id(&mut rng)); + + send_request(&mut socket, &mut buffer, &mut statistics, additional_request); + + update_shared_statistics(&state, &mut statistics); } } } } -fn send_requests( - state: &LoadTestState, +fn send_request( socket: &mut UdpSocket, buffer: &mut [u8], statistics: &mut SocketWorkerLocalStatistics, - requests: Drain, + request: Request, ) { let mut cursor = Cursor::new(buffer); - for request in requests { - cursor.set_position(0); + match request.write(&mut cursor) { + Ok(()) => { + let position = cursor.position() as usize; + let inner = cursor.get_ref(); - if let Err(err) = request.write(&mut cursor) { + match socket.send(&inner[..position]) { + Ok(_) => { + statistics.requests += 1; + } + Err(err) => { + eprintln!("Couldn't send packet: {:?}", err); + } + } + } + Err(err) => { eprintln!("request_to_bytes err: {}", err); } - - let position = cursor.position() as usize; - let inner = cursor.get_ref(); - - match socket.send(&inner[..position]) { - Ok(_) => { - statistics.requests += 1; - } - Err(err) => { - eprintln!("Couldn't send packet: {:?}", err); - } - } } +} +fn update_shared_statistics(state: &LoadTestState, statistics: &mut SocketWorkerLocalStatistics) { state .statistics .requests From fc6f3c299e761708e1dc40c22c32f77bdee87af6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Wed, 17 Nov 2021 01:59:31 +0100 Subject: [PATCH 27/56] udp load test: move config to own file, clean up imports --- Cargo.lock | 2 - aquatic_udp_load_test/Cargo.toml | 2 - aquatic_udp_load_test/src/common.rs | 121 -------------------------- aquatic_udp_load_test/src/config.rs | 124 +++++++++++++++++++++++++++ aquatic_udp_load_test/src/handler.rs | 1 + aquatic_udp_load_test/src/main.rs | 2 + aquatic_udp_load_test/src/network.rs | 1 + aquatic_udp_load_test/src/utils.rs | 1 + 8 files changed, 129 insertions(+), 125 deletions(-) create mode 100644 aquatic_udp_load_test/src/config.rs diff --git a/Cargo.lock b/Cargo.lock index 62ea631..50b5ece 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -224,11 +224,9 @@ dependencies = [ "aquatic_cli_helpers", "aquatic_common", "aquatic_udp_protocol", - "crossbeam-channel", "hashbrown 0.11.2", "mimalloc", "mio", - "parking_lot", "quickcheck", "quickcheck_macros", "rand", diff --git a/aquatic_udp_load_test/Cargo.toml b/aquatic_udp_load_test/Cargo.toml index d77977b..52d39fa 100644 --- a/aquatic_udp_load_test/Cargo.toml +++ b/aquatic_udp_load_test/Cargo.toml @@ -17,11 +17,9 @@ anyhow = "1" aquatic_cli_helpers = "0.1.0" aquatic_common = "0.1.0" aquatic_udp_protocol = "0.1.0" -crossbeam-channel = "0.5" hashbrown = "0.11.2" mimalloc = { version = "0.1", default-features = false } mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } -parking_lot = "0.11" rand = { version = "0.8", features = ["small_rng"] } rand_distr = "0.4" serde = { version = "1", features = ["derive"] } diff --git a/aquatic_udp_load_test/src/common.rs b/aquatic_udp_load_test/src/common.rs index fe81667..276c324 100644 --- a/aquatic_udp_load_test/src/common.rs +++ b/aquatic_udp_load_test/src/common.rs @@ -1,133 +1,12 @@ -use std::net::SocketAddr; use std::sync::{atomic::AtomicUsize, Arc}; -use aquatic_cli_helpers::LogLevel; -#[cfg(feature = "cpu-pinning")] -use aquatic_common::cpu_pinning::CpuPinningConfig; use hashbrown::HashMap; -use serde::{Deserialize, Serialize}; use aquatic_udp_protocol::*; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] pub struct ThreadId(pub u8); -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(default)] -pub struct Config { - /// Server address - /// - /// If you want to send IPv4 requests to a IPv4+IPv6 tracker, put an IPv4 - /// address here. - pub server_address: SocketAddr, - pub log_level: LogLevel, - pub workers: u8, - /// Run duration (quit and generate report after this many seconds) - pub duration: usize, - pub network: NetworkConfig, - pub handler: HandlerConfig, - #[cfg(feature = "cpu-pinning")] - pub cpu_pinning: CpuPinningConfig, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(default)] -pub struct NetworkConfig { - /// True means bind to one localhost IP per socket. - /// - /// The point of multiple IPs is to cause a better distribution - /// of requests to servers with SO_REUSEPORT option. - /// - /// Setting this to true can cause issues on macOS. - pub multiple_client_ipv4s: bool, - /// Number of first client port - pub first_port: u16, - /// Socket worker poll timeout in microseconds - pub poll_timeout: u64, - /// Socket worker polling event number - pub poll_event_capacity: usize, - /// Size of socket recv buffer. Use 0 for OS default. - /// - /// This setting can have a big impact on dropped packages. It might - /// require changing system defaults. Some examples of commands to set - /// recommended values for different operating systems: - /// - /// macOS: - /// $ sudo sysctl net.inet.udp.recvspace=6000000 - /// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended - /// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended - /// - /// Linux: - /// $ sudo sysctl -w net.core.rmem_max=104857600 - /// $ sudo sysctl -w net.core.rmem_default=104857600 - pub recv_buffer: usize, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -#[serde(default)] -pub struct HandlerConfig { - /// Number of torrents to simulate - pub number_of_torrents: usize, - /// Maximum number of torrents to ask about in scrape requests - pub scrape_max_torrents: usize, - /// Probability that a generated request is a connect request as part - /// of sum of the various weight arguments. - pub weight_connect: usize, - /// Probability that a generated request is a announce request, as part - /// of sum of the various weight arguments. - pub weight_announce: usize, - /// Probability that a generated request is a scrape request, as part - /// of sum of the various weight arguments. - pub weight_scrape: usize, - /// Pareto shape - /// - /// Fake peers choose torrents according to Pareto distribution. - pub torrent_selection_pareto_shape: f64, - /// Probability that a generated peer is a seeder - pub peer_seeder_probability: f64, -} - -impl Default for Config { - fn default() -> Self { - Self { - server_address: "127.0.0.1:3000".parse().unwrap(), - log_level: LogLevel::Error, - workers: 1, - duration: 0, - network: NetworkConfig::default(), - handler: HandlerConfig::default(), - #[cfg(feature = "cpu-pinning")] - cpu_pinning: CpuPinningConfig::default_for_load_test(), - } - } -} - -impl Default for NetworkConfig { - fn default() -> Self { - Self { - multiple_client_ipv4s: true, - first_port: 45_000, - poll_timeout: 276, - poll_event_capacity: 2_877, - recv_buffer: 6_000_000, - } - } -} - -impl Default for HandlerConfig { - fn default() -> Self { - Self { - number_of_torrents: 10_000, - peer_seeder_probability: 0.25, - scrape_max_torrents: 50, - weight_connect: 0, - weight_announce: 5, - weight_scrape: 1, - torrent_selection_pareto_shape: 2.0, - } - } -} - #[derive(PartialEq, Eq, Clone)] pub struct TorrentPeer { pub info_hash: InfoHash, diff --git a/aquatic_udp_load_test/src/config.rs b/aquatic_udp_load_test/src/config.rs new file mode 100644 index 0000000..458adc4 --- /dev/null +++ b/aquatic_udp_load_test/src/config.rs @@ -0,0 +1,124 @@ +use std::net::SocketAddr; + +use serde::{Deserialize, Serialize}; + +use aquatic_cli_helpers::LogLevel; +#[cfg(feature = "cpu-pinning")] +use aquatic_common::cpu_pinning::CpuPinningConfig; + + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(default)] +pub struct Config { + /// Server address + /// + /// If you want to send IPv4 requests to a IPv4+IPv6 tracker, put an IPv4 + /// address here. + pub server_address: SocketAddr, + pub log_level: LogLevel, + pub workers: u8, + /// Run duration (quit and generate report after this many seconds) + pub duration: usize, + pub network: NetworkConfig, + pub handler: HandlerConfig, + #[cfg(feature = "cpu-pinning")] + pub cpu_pinning: CpuPinningConfig, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(default)] +pub struct NetworkConfig { + /// True means bind to one localhost IP per socket. + /// + /// The point of multiple IPs is to cause a better distribution + /// of requests to servers with SO_REUSEPORT option. + /// + /// Setting this to true can cause issues on macOS. + pub multiple_client_ipv4s: bool, + /// Number of first client port + pub first_port: u16, + /// Socket worker poll timeout in microseconds + pub poll_timeout: u64, + /// Socket worker polling event number + pub poll_event_capacity: usize, + /// Size of socket recv buffer. Use 0 for OS default. + /// + /// This setting can have a big impact on dropped packages. It might + /// require changing system defaults. Some examples of commands to set + /// recommended values for different operating systems: + /// + /// macOS: + /// $ sudo sysctl net.inet.udp.recvspace=6000000 + /// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended + /// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended + /// + /// Linux: + /// $ sudo sysctl -w net.core.rmem_max=104857600 + /// $ sudo sysctl -w net.core.rmem_default=104857600 + pub recv_buffer: usize, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(default)] +pub struct HandlerConfig { + /// Number of torrents to simulate + pub number_of_torrents: usize, + /// Maximum number of torrents to ask about in scrape requests + pub scrape_max_torrents: usize, + /// Probability that a generated request is a connect request as part + /// of sum of the various weight arguments. + pub weight_connect: usize, + /// Probability that a generated request is a announce request, as part + /// of sum of the various weight arguments. + pub weight_announce: usize, + /// Probability that a generated request is a scrape request, as part + /// of sum of the various weight arguments. + pub weight_scrape: usize, + /// Pareto shape + /// + /// Fake peers choose torrents according to Pareto distribution. + pub torrent_selection_pareto_shape: f64, + /// Probability that a generated peer is a seeder + pub peer_seeder_probability: f64, +} + +impl Default for Config { + fn default() -> Self { + Self { + server_address: "127.0.0.1:3000".parse().unwrap(), + log_level: LogLevel::Error, + workers: 1, + duration: 0, + network: NetworkConfig::default(), + handler: HandlerConfig::default(), + #[cfg(feature = "cpu-pinning")] + cpu_pinning: CpuPinningConfig::default_for_load_test(), + } + } +} + +impl Default for NetworkConfig { + fn default() -> Self { + Self { + multiple_client_ipv4s: true, + first_port: 45_000, + poll_timeout: 276, + poll_event_capacity: 2_877, + recv_buffer: 6_000_000, + } + } +} + +impl Default for HandlerConfig { + fn default() -> Self { + Self { + number_of_torrents: 10_000, + peer_seeder_probability: 0.25, + scrape_max_torrents: 50, + weight_connect: 0, + weight_announce: 5, + weight_scrape: 1, + torrent_selection_pareto_shape: 2.0, + } + } +} \ No newline at end of file diff --git a/aquatic_udp_load_test/src/handler.rs b/aquatic_udp_load_test/src/handler.rs index cc98c27..fe0a188 100644 --- a/aquatic_udp_load_test/src/handler.rs +++ b/aquatic_udp_load_test/src/handler.rs @@ -6,6 +6,7 @@ use rand_distr::Pareto; use aquatic_udp_protocol::*; +use crate::config::Config; use crate::common::*; use crate::utils::*; diff --git a/aquatic_udp_load_test/src/main.rs b/aquatic_udp_load_test/src/main.rs index 9bd4dc4..3a7651c 100644 --- a/aquatic_udp_load_test/src/main.rs +++ b/aquatic_udp_load_test/src/main.rs @@ -8,11 +8,13 @@ use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex}; use rand_distr::Pareto; mod common; +mod config; mod handler; mod network; mod utils; use common::*; +use config::Config; use network::*; use utils::*; diff --git a/aquatic_udp_load_test/src/network.rs b/aquatic_udp_load_test/src/network.rs index a74e28c..e89df1f 100644 --- a/aquatic_udp_load_test/src/network.rs +++ b/aquatic_udp_load_test/src/network.rs @@ -10,6 +10,7 @@ use socket2::{Domain, Protocol, Socket, Type}; use aquatic_udp_protocol::*; +use crate::config::Config; use crate::{common::*, handler::process_response, utils::*}; const MAX_PACKET_SIZE: usize = 4096; diff --git a/aquatic_udp_load_test/src/utils.rs b/aquatic_udp_load_test/src/utils.rs index b2ee9c8..f9ee26e 100644 --- a/aquatic_udp_load_test/src/utils.rs +++ b/aquatic_udp_load_test/src/utils.rs @@ -5,6 +5,7 @@ use rand_distr::Pareto; use aquatic_udp_protocol::*; +use crate::config::Config; use crate::common::*; pub fn create_torrent_peer( From 71a093dcec0cd9c9db5ccff4e2856ee25990b299 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Wed, 17 Nov 2021 01:59:45 +0100 Subject: [PATCH 28/56] udp load test: run cargo fmt --- aquatic_udp_load_test/src/config.rs | 3 +-- aquatic_udp_load_test/src/handler.rs | 2 +- aquatic_udp_load_test/src/network.rs | 7 ++++++- aquatic_udp_load_test/src/utils.rs | 2 +- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/aquatic_udp_load_test/src/config.rs b/aquatic_udp_load_test/src/config.rs index 458adc4..cbd1176 100644 --- a/aquatic_udp_load_test/src/config.rs +++ b/aquatic_udp_load_test/src/config.rs @@ -6,7 +6,6 @@ use aquatic_cli_helpers::LogLevel; #[cfg(feature = "cpu-pinning")] use aquatic_common::cpu_pinning::CpuPinningConfig; - #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct Config { @@ -121,4 +120,4 @@ impl Default for HandlerConfig { torrent_selection_pareto_shape: 2.0, } } -} \ No newline at end of file +} diff --git a/aquatic_udp_load_test/src/handler.rs b/aquatic_udp_load_test/src/handler.rs index fe0a188..2611341 100644 --- a/aquatic_udp_load_test/src/handler.rs +++ b/aquatic_udp_load_test/src/handler.rs @@ -6,8 +6,8 @@ use rand_distr::Pareto; use aquatic_udp_protocol::*; -use crate::config::Config; use crate::common::*; +use crate::config::Config; use crate::utils::*; pub fn process_response( diff --git a/aquatic_udp_load_test/src/network.rs b/aquatic_udp_load_test/src/network.rs index e89df1f..f6b08dc 100644 --- a/aquatic_udp_load_test/src/network.rs +++ b/aquatic_udp_load_test/src/network.rs @@ -128,7 +128,12 @@ pub fn run_worker_thread( let additional_request = create_connect_request(generate_transaction_id(&mut rng)); - send_request(&mut socket, &mut buffer, &mut statistics, additional_request); + send_request( + &mut socket, + &mut buffer, + &mut statistics, + additional_request, + ); update_shared_statistics(&state, &mut statistics); } diff --git a/aquatic_udp_load_test/src/utils.rs b/aquatic_udp_load_test/src/utils.rs index f9ee26e..f88c211 100644 --- a/aquatic_udp_load_test/src/utils.rs +++ b/aquatic_udp_load_test/src/utils.rs @@ -5,8 +5,8 @@ use rand_distr::Pareto; use aquatic_udp_protocol::*; -use crate::config::Config; use crate::common::*; +use crate::config::Config; pub fn create_torrent_peer( config: &Config, From b5643aa7abeacd994f25a133708f67131c10feeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Wed, 17 Nov 2021 02:34:39 +0100 Subject: [PATCH 29/56] Update TODO --- TODO.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TODO.md b/TODO.md index 6172add..b64834d 100644 --- a/TODO.md +++ b/TODO.md @@ -20,8 +20,8 @@ * notes * load testing shows that with sharded state, mio reaches 1.4M responses per second with 6 socket and 4 request workers. performance is great overall and faster than - without sharding. io_uring impl is slightly behind or slighly ahead of mio, but - nothing justifying code complexity and unsafety + without sharding. io_uring impl is a lot behind mio impl with new load tester + * what poll event capacity is actually needed? * clean torrent map in workers, remove it from shared state * mio * stagger connection cleaning intervals? From 54149ed3eb8440fa15b6cbafb2f14e3e0e53141a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 21:56:48 +0100 Subject: [PATCH 30/56] aquatic_common: add work-in-progress SO_ATTACH_REUSEPORT_CBPF impl --- Cargo.lock | 1 + aquatic_common/Cargo.toml | 5 +- aquatic_common/src/cpu_pinning.rs | 103 ++++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 50b5ece..2e2b2f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,6 +93,7 @@ dependencies = [ "hashbrown 0.11.2", "hex", "indexmap-amortized", + "libc", "log", "privdrop", "rand", diff --git a/aquatic_common/Cargo.toml b/aquatic_common/Cargo.toml index e0a4992..b076f77 100644 --- a/aquatic_common/Cargo.toml +++ b/aquatic_common/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/greatest-ape/aquatic" name = "aquatic_common" [features] -cpu-pinning = ["affinity"] +cpu-pinning = ["affinity", "libc"] [dependencies] ahash = "0.7" @@ -25,4 +25,5 @@ privdrop = "0.5" rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } -affinity = { version = "0.1", optional = true } \ No newline at end of file +affinity = { version = "0.1", optional = true } +libc = { version = "0.2", optional = true } \ No newline at end of file diff --git a/aquatic_common/src/cpu_pinning.rs b/aquatic_common/src/cpu_pinning.rs index ddfa833..43f506b 100644 --- a/aquatic_common/src/cpu_pinning.rs +++ b/aquatic_common/src/cpu_pinning.rs @@ -104,3 +104,106 @@ pub fn pin_current_if_configured_to( } } } + +/// Tell Linux that incoming messages should be handled by the socket worker +/// with the same index as the CPU core receiving the interrupt. +/// +/// Requires that sockets are actually bound in order, so waiting has to be done +/// in socket workers. +/// +/// It might make sense to first enable RSS or RPS (if hardware doesn't support +/// RSS) and enable sending interrupts to all CPUs that have socket workers +/// running on them. Possibly, CPU 0 should be excluded. +/// +/// More Information: +/// - https://talawah.io/blog/extreme-http-performance-tuning-one-point-two-million/ +/// - https://www.kernel.org/doc/Documentation/networking/scaling.txt +/// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/performance_tuning_guide/network-rps +#[cfg(target_os = "linux")] +pub fn socket_attach_cbpf( + socket: &S, + _num_sockets: usize, +) -> ::std::io::Result<()> { + use std::mem::size_of; + use std::os::raw::c_void; + + use libc::{setsockopt, sock_filter, sock_fprog, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF}; + + // Good BPF documentation: https://man.openbsd.org/bpf.4 + + // Values of constants were copied from the following Linux source files: + // - include/uapi/linux/bpf_common.h + // - include/uapi/linux/filter.h + + // Instruction + const BPF_LD: u16 = 0x00; // Load into A + // const BPF_LDX: u16 = 0x01; // Load into X + // const BPF_ALU: u16 = 0x04; // Load into X + const BPF_RET: u16 = 0x06; // Return value + // const BPF_MOD: u16 = 0x90; // Run modulo on A + + // Size + const BPF_W: u16 = 0x00; // 32-bit width + + // Source + // const BPF_IMM: u16 = 0x00; // Use constant (k) + const BPF_ABS: u16 = 0x20; + + // Registers + // const BPF_K: u16 = 0x00; + const BPF_A: u16 = 0x10; + + // k + const SKF_AD_OFF: i32 = -0x1000; // Activate extensions + const SKF_AD_CPU: i32 = 36; // Extension for getting CPU + + // Return index of socket that should receive packet + let mut filter = [ + // Store index of CPU receiving packet in register A + sock_filter { + code: BPF_LD | BPF_W | BPF_ABS, + jt: 0, + jf: 0, + k: u32::from_ne_bytes((SKF_AD_OFF + SKF_AD_CPU).to_ne_bytes()), + }, + /* Disabled, because it doesn't make a lot of sense + // Run A = A % socket_workers + sock_filter { + code: BPF_ALU | BPF_MOD, + jt: 0, + jf: 0, + k: num_sockets as u32, + }, + */ + // Return A + sock_filter { + code: BPF_RET | BPF_A, + jt: 0, + jf: 0, + k: 0, + }, + ]; + + let program = sock_fprog { + filter: filter.as_mut_ptr(), + len: filter.len() as u16, + }; + + let program_ptr: *const sock_fprog = &program; + + unsafe { + let result = setsockopt( + socket.as_raw_fd(), + SOL_SOCKET, + SO_ATTACH_REUSEPORT_CBPF, + program_ptr as *const c_void, + size_of::() as u32, + ); + + if result != 0 { + Err(::std::io::Error::last_os_error()) + } else { + Ok(()) + } + } +} From 99632d4be59daef6c325e12bc1aa66c84b122c40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:13:07 +0100 Subject: [PATCH 31/56] udp: implement torrent map cleaning for new, sharded torrent state --- Cargo.lock | 1 - TODO.md | 1 - aquatic_udp/Cargo.toml | 1 - aquatic_udp/src/lib/common/mod.rs | 3 -- aquatic_udp/src/lib/handlers.rs | 24 +++++++++++-- aquatic_udp/src/lib/lib.rs | 51 ++++++---------------------- aquatic_udp/src/lib/tasks.rs | 56 ------------------------------- aquatic_udp_bench/src/main.rs | 8 +++-- 8 files changed, 38 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e2b2f4..9325c50 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -184,7 +184,6 @@ dependencies = [ "cfg-if", "crossbeam-channel", "hex", - "histogram", "io-uring", "libc", "log", diff --git a/TODO.md b/TODO.md index b64834d..488b937 100644 --- a/TODO.md +++ b/TODO.md @@ -22,7 +22,6 @@ with 6 socket and 4 request workers. performance is great overall and faster than without sharding. io_uring impl is a lot behind mio impl with new load tester * what poll event capacity is actually needed? - * clean torrent map in workers, remove it from shared state * mio * stagger connection cleaning intervals? * uring diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index f2756e4..8e9ecb9 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -28,7 +28,6 @@ aquatic_udp_protocol = "0.1.0" cfg-if = "1" crossbeam-channel = "0.5" hex = "0.4" -histogram = "0.6" log = "0.4" mimalloc = { version = "0.1", default-features = false } parking_lot = "0.11" diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common/mod.rs index 72f9862..be382b9 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common/mod.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use std::time::Instant; use crossbeam_channel::Sender; -use parking_lot::Mutex; use socket2::{Domain, Protocol, Socket, Type}; use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap}; @@ -304,7 +303,6 @@ pub struct Statistics { #[derive(Clone)] pub struct State { pub access_list: Arc, - pub torrents: Arc>, pub statistics: Arc, } @@ -312,7 +310,6 @@ impl Default for State { fn default() -> Self { Self { access_list: Arc::new(AccessListArcSwap::default()), - torrents: Arc::new(Mutex::new(TorrentMaps::default())), statistics: Arc::new(Statistics::default()), } } diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index 1f478c4..dcf2d28 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::net::IpAddr; use std::net::SocketAddr; use std::time::Duration; +use std::time::Instant; use aquatic_common::ValidUntil; use crossbeam_channel::Receiver; @@ -16,6 +17,7 @@ use crate::config::Config; pub fn run_request_worker( config: Config, + state: State, request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, response_sender: ConnectedResponseSender, ) { @@ -23,11 +25,15 @@ pub fn run_request_worker( let mut small_rng = SmallRng::from_entropy(); let timeout = Duration::from_millis(config.handlers.channel_recv_timeout_ms); + let mut peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); + + let cleaning_interval = Duration::from_secs(config.cleaning.torrent_cleaning_interval); + + let mut iter_counter = 0usize; + let mut last_cleaning = Instant::now(); loop { if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) { - let peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); - let response = match request { ConnectedRequest::Announce(request) => handle_announce_request( &config, @@ -45,7 +51,19 @@ pub fn run_request_worker( response_sender.try_send_to(sender_index, response, src); } - // TODO: clean torrent map, update peer_valid_until + if iter_counter % 128 == 0 { + peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); + + let now = Instant::now(); + + if now > last_cleaning + cleaning_interval { + torrents.clean(&config, &state.access_list); + + last_cleaning = now; + } + } + + iter_counter = iter_counter.wrapping_add(1); } } diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index e321e95..014e0ca 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -35,33 +35,6 @@ pub fn run(config: Config) -> ::anyhow::Result<()> { let mut signals = Signals::new(::std::iter::once(SIGUSR1))?; - { - let config = config.clone(); - let state = state.clone(); - - ::std::thread::spawn(move || run_inner(config, state)); - } - - #[cfg(feature = "cpu-pinning")] - pin_current_if_configured_to( - &config.cpu_pinning, - config.socket_workers, - WorkerIndex::Other, - ); - - for signal in &mut signals { - match signal { - SIGUSR1 => { - let _ = update_access_list(&config.access_list, &state.access_list); - } - _ => unreachable!(), - } - } - - Ok(()) -} - -pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { let num_bound_sockets = Arc::new(AtomicUsize::new(0)); let mut request_senders = Vec::new(); @@ -86,6 +59,7 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { for i in 0..config.request_workers { let config = config.clone(); + let state = state.clone(); let request_receiver = request_receivers.remove(&i).unwrap().clone(); let response_sender = ConnectedResponseSender::new(response_senders.clone()); @@ -99,7 +73,7 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { WorkerIndex::RequestWorker(i), ); - handlers::run_request_worker(config, request_receiver, response_sender) + handlers::run_request_worker(config, state, request_receiver, response_sender) }) .with_context(|| "spawn request worker")?; } @@ -146,12 +120,6 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { .with_context(|| "spawn socket worker")?; } - ::std::mem::drop(request_senders); - ::std::mem::drop(request_receivers); - - ::std::mem::drop(response_senders); - ::std::mem::drop(response_receivers); - if config.statistics.interval != 0 { let state = state.clone(); let config = config.clone(); @@ -189,11 +157,14 @@ pub fn run_inner(config: Config, state: State) -> ::anyhow::Result<()> { WorkerIndex::Other, ); - loop { - ::std::thread::sleep(Duration::from_secs( - config.cleaning.torrent_cleaning_interval, - )); - - state.torrents.lock().clean(&config, &state.access_list); + for signal in &mut signals { + match signal { + SIGUSR1 => { + let _ = update_access_list(&config.access_list, &state.access_list); + } + _ => unreachable!(), + } } + + Ok(()) } diff --git a/aquatic_udp/src/lib/tasks.rs b/aquatic_udp/src/lib/tasks.rs index c4bcac3..8e22560 100644 --- a/aquatic_udp/src/lib/tasks.rs +++ b/aquatic_udp/src/lib/tasks.rs @@ -1,7 +1,5 @@ use std::sync::atomic::Ordering; -use histogram::Histogram; - use super::common::*; use crate::config::Config; @@ -38,59 +36,5 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { bytes_sent_per_second * 8.0 / 1_000_000.0, ); - let mut total_num_torrents_ipv4 = 0usize; - let mut total_num_torrents_ipv6 = 0usize; - let mut total_num_peers_ipv4 = 0usize; - let mut total_num_peers_ipv6 = 0usize; - - let mut peers_per_torrent = Histogram::new(); - - { - let torrents = &mut state.torrents.lock(); - - for torrent in torrents.ipv4.values() { - let num_peers = torrent.num_seeders + torrent.num_leechers; - - if let Err(err) = peers_per_torrent.increment(num_peers as u64) { - ::log::error!("error incrementing peers_per_torrent histogram: {}", err) - } - - total_num_peers_ipv4 += num_peers; - } - for torrent in torrents.ipv6.values() { - let num_peers = torrent.num_seeders + torrent.num_leechers; - - if let Err(err) = peers_per_torrent.increment(num_peers as u64) { - ::log::error!("error incrementing peers_per_torrent histogram: {}", err) - } - - total_num_peers_ipv6 += num_peers; - } - - total_num_torrents_ipv4 += torrents.ipv4.len(); - total_num_torrents_ipv6 += torrents.ipv6.len(); - } - - println!( - "ipv4 torrents: {}, peers: {}; ipv6 torrents: {}, peers: {}", - total_num_torrents_ipv4, - total_num_peers_ipv4, - total_num_torrents_ipv6, - total_num_peers_ipv6, - ); - - if peers_per_torrent.entries() != 0 { - println!( - "peers per torrent: min: {}, p50: {}, p75: {}, p90: {}, p99: {}, p999: {}, max: {}", - peers_per_torrent.minimum().unwrap(), - peers_per_torrent.percentile(50.0).unwrap(), - peers_per_torrent.percentile(75.0).unwrap(), - peers_per_torrent.percentile(90.0).unwrap(), - peers_per_torrent.percentile(99.0).unwrap(), - peers_per_torrent.percentile(99.9).unwrap(), - peers_per_torrent.maximum().unwrap(), - ); - } - println!(); } diff --git a/aquatic_udp_bench/src/main.rs b/aquatic_udp_bench/src/main.rs index 1bc0034..6c77bee 100644 --- a/aquatic_udp_bench/src/main.rs +++ b/aquatic_udp_bench/src/main.rs @@ -39,7 +39,9 @@ fn main() { pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { // Setup common state, spawn request handlers - let aquatic_config = Config::default(); + let mut aquatic_config = Config::default(); + + aquatic_config.cleaning.torrent_cleaning_interval = 60 * 60 * 24; let (request_sender, request_receiver) = unbounded(); let (response_sender, response_receiver) = unbounded(); @@ -49,7 +51,9 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { { let config = aquatic_config.clone(); - ::std::thread::spawn(move || run_request_worker(config, request_receiver, response_sender)); + ::std::thread::spawn(move || { + run_request_worker(config, State::default(), request_receiver, response_sender) + }); } // Run benchmarks From 138ae710efa3c87c3047e546f4efcbfde0e36b6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:18:45 +0100 Subject: [PATCH 32/56] udp: remove io_uring version, it is slower than mio version --- Cargo.lock | 19 - aquatic_udp/Cargo.toml | 12 +- aquatic_udp/src/lib/config.rs | 2 - aquatic_udp/src/lib/lib.rs | 31 +- .../src/lib/{network_mio.rs => network.rs} | 0 aquatic_udp/src/lib/network_uring.rs | 483 ------------------ scripts/run-aquatic-udp.sh | 14 +- 7 files changed, 10 insertions(+), 551 deletions(-) rename aquatic_udp/src/lib/{network_mio.rs => network.rs} (100%) delete mode 100644 aquatic_udp/src/lib/network_uring.rs diff --git a/Cargo.lock b/Cargo.lock index 9325c50..c2e45b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -180,12 +180,9 @@ dependencies = [ "aquatic_cli_helpers", "aquatic_common", "aquatic_udp_protocol", - "bytemuck", "cfg-if", "crossbeam-channel", "hex", - "io-uring", - "libc", "log", "mimalloc", "mio", @@ -447,12 +444,6 @@ version = "3.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9df67f7bf9ef8498769f994239c45613ef0c5899415fb58e9add412d2c1a538" -[[package]] -name = "bytemuck" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" - [[package]] name = "byteorder" version = "1.4.3" @@ -1137,16 +1128,6 @@ dependencies = [ "memoffset 0.5.6", ] -[[package]] -name = "io-uring" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d75829ed9377bab6c90039fe47b9d84caceb4b5063266142e21bcce6550cda8" -dependencies = [ - "bitflags", - "libc", -] - [[package]] name = "itertools" version = "0.10.1" diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index 8e9ecb9..ef91567 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -15,10 +15,7 @@ path = "src/lib/lib.rs" name = "aquatic_udp" [features] -default = ["with-mio"] cpu-pinning = ["aquatic_common/cpu-pinning"] -with-mio = ["mio"] -with-io-uring = ["io-uring", "libc", "bytemuck"] [dependencies] anyhow = "1" @@ -30,6 +27,7 @@ crossbeam-channel = "0.5" hex = "0.4" log = "0.4" mimalloc = { version = "0.1", default-features = false } +mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } parking_lot = "0.11" rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } @@ -37,14 +35,6 @@ slab = "0.4" signal-hook = { version = "0.3" } socket2 = { version = "0.4.1", features = ["all"] } -# mio -mio = { version = "0.7", features = ["udp", "os-poll", "os-util"], optional = true } - -# io-uring -io-uring = { version = "0.5", optional = true } -libc = { version = "0.2", optional = true } -bytemuck = { version = "1", optional = true } - [dev-dependencies] quickcheck = "1.0" quickcheck_macros = "1.0" diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index e9b3108..f26ba4e 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -54,7 +54,6 @@ pub struct NetworkConfig { /// $ sudo sysctl -w net.core.rmem_max=104857600 /// $ sudo sysctl -w net.core.rmem_default=104857600 pub socket_recv_buffer_size: usize, - #[cfg(feature = "with-mio")] pub poll_event_capacity: usize, } @@ -120,7 +119,6 @@ impl Default for NetworkConfig { address: SocketAddr::from(([0, 0, 0, 0], 3000)), only_ipv6: false, socket_recv_buffer_size: 4096 * 128, - #[cfg(feature = "with-mio")] poll_event_capacity: 4096, } } diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index 014e0ca..f42c377 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -1,10 +1,7 @@ pub mod common; pub mod config; pub mod handlers; -#[cfg(feature = "with-mio")] -pub mod network_mio; -#[cfg(feature = "with-io-uring")] -pub mod network_uring; +pub mod network; pub mod tasks; use config::Config; @@ -96,25 +93,13 @@ pub fn run(config: Config) -> ::anyhow::Result<()> { WorkerIndex::SocketWorker(i), ); - cfg_if::cfg_if!( - if #[cfg(feature = "with-io-uring")] { - network_uring::run_socket_worker( - state, - config, - request_sender, - response_receiver, - num_bound_sockets, - ); - } else { - network_mio::run_socket_worker( - state, - config, - i, - request_sender, - response_receiver, - num_bound_sockets, - ); - } + network::run_socket_worker( + state, + config, + i, + request_sender, + response_receiver, + num_bound_sockets, ); }) .with_context(|| "spawn socket worker")?; diff --git a/aquatic_udp/src/lib/network_mio.rs b/aquatic_udp/src/lib/network.rs similarity index 100% rename from aquatic_udp/src/lib/network_mio.rs rename to aquatic_udp/src/lib/network.rs diff --git a/aquatic_udp/src/lib/network_uring.rs b/aquatic_udp/src/lib/network_uring.rs deleted file mode 100644 index 72c0e28..0000000 --- a/aquatic_udp/src/lib/network_uring.rs +++ /dev/null @@ -1,483 +0,0 @@ -use std::io::Cursor; -use std::mem::size_of; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4}; -use std::os::unix::prelude::AsRawFd; -use std::ptr::null_mut; -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, -}; -use std::time::{Duration, Instant}; - -use aquatic_common::access_list::create_access_list_cache; -use aquatic_common::ValidUntil; -use crossbeam_channel::Receiver; -use io_uring::types::{Fixed, Timespec}; -use io_uring::SubmissionQueue; -use libc::{ - c_void, in6_addr, in_addr, iovec, msghdr, sockaddr_in, sockaddr_in6, AF_INET, AF_INET6, -}; -use rand::prelude::{SeedableRng, StdRng}; -use slab::Slab; - -use aquatic_udp_protocol::{Request, Response}; - -use crate::common::network::ConnectionMap; -use crate::common::network::*; -use crate::common::*; -use crate::config::Config; - -const RING_SIZE: usize = 128; -const MAX_RECV_EVENTS: usize = 1; -const MAX_SEND_EVENTS: usize = RING_SIZE - MAX_RECV_EVENTS - 1; -const NUM_BUFFERS: usize = MAX_RECV_EVENTS + MAX_SEND_EVENTS; - -#[derive(Clone, Copy, Debug, PartialEq)] -enum UserData { - RecvMsg { slab_key: usize }, - SendMsg { slab_key: usize }, - Timeout, -} - -impl UserData { - fn get_buffer_index(&self) -> usize { - match self { - Self::RecvMsg { slab_key } => *slab_key, - Self::SendMsg { slab_key } => slab_key + MAX_RECV_EVENTS, - Self::Timeout => { - unreachable!() - } - } - } -} - -impl From for UserData { - fn from(mut n: u64) -> UserData { - let bytes = bytemuck::bytes_of_mut(&mut n); - - let t = bytes[7]; - - bytes[7] = 0; - - match t { - 0 => Self::RecvMsg { - slab_key: n as usize, - }, - 1 => Self::SendMsg { - slab_key: n as usize, - }, - 2 => Self::Timeout, - _ => unreachable!(), - } - } -} - -impl Into for UserData { - fn into(self) -> u64 { - match self { - Self::RecvMsg { slab_key } => { - let mut out = slab_key as u64; - - bytemuck::bytes_of_mut(&mut out)[7] = 0; - - out - } - Self::SendMsg { slab_key } => { - let mut out = slab_key as u64; - - bytemuck::bytes_of_mut(&mut out)[7] = 1; - - out - } - Self::Timeout => { - let mut out = 0u64; - - bytemuck::bytes_of_mut(&mut out)[7] = 2; - - out - } - } - } -} - -pub fn run_socket_worker( - state: State, - config: Config, - request_sender: ConnectedRequestSender, - response_receiver: Receiver<(ConnectedResponse, SocketAddr)>, - num_bound_sockets: Arc, -) { - let mut rng = StdRng::from_entropy(); - - let socket = create_socket(&config); - - num_bound_sockets.fetch_add(1, Ordering::SeqCst); - - let mut connections = ConnectionMap::default(); - let mut pending_scrape_responses = PendingScrapeResponseMap::default(); - let mut access_list_cache = create_access_list_cache(&state.access_list); - let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new(); - - let mut buffers: Vec<[u8; MAX_PACKET_SIZE]> = - (0..NUM_BUFFERS).map(|_| [0; MAX_PACKET_SIZE]).collect(); - - let mut sockaddrs_ipv4 = [sockaddr_in { - sin_addr: in_addr { s_addr: 0 }, - sin_port: 0, - sin_family: AF_INET as u16, - sin_zero: Default::default(), - }; NUM_BUFFERS]; - - let mut sockaddrs_ipv6 = [sockaddr_in6 { - sin6_addr: in6_addr { s6_addr: [0; 16] }, - sin6_port: 0, - sin6_family: AF_INET6 as u16, - sin6_flowinfo: 0, - sin6_scope_id: 0, - }; NUM_BUFFERS]; - - let mut iovs: Vec = (0..NUM_BUFFERS) - .map(|i| { - let iov_base = buffers[i].as_mut_ptr() as *mut c_void; - let iov_len = MAX_PACKET_SIZE; - - iovec { iov_base, iov_len } - }) - .collect(); - - let mut msghdrs: Vec = (0..NUM_BUFFERS) - .map(|i| { - let msg_iov: *mut iovec = &mut iovs[i]; - - let mut msghdr = msghdr { - msg_name: null_mut(), - msg_namelen: 0, - msg_iov, - msg_iovlen: 1, - msg_control: null_mut(), - msg_controllen: 0, - msg_flags: 0, - }; - - if config.network.address.is_ipv4() { - let ptr: *mut sockaddr_in = &mut sockaddrs_ipv4[i]; - - msghdr.msg_name = ptr as *mut c_void; - msghdr.msg_namelen = size_of::() as u32; - } else { - let ptr: *mut sockaddr_in6 = &mut sockaddrs_ipv6[i]; - - msghdr.msg_name = ptr as *mut c_void; - msghdr.msg_namelen = size_of::() as u32; - } - - msghdr - }) - .collect(); - - let timeout = Timespec::new().nsec(100_000_000); - - let mut force_send_responses = false; - let mut timeout_queued = false; - - let mut recv_entries = Slab::with_capacity(MAX_RECV_EVENTS); - let mut send_entries = Slab::with_capacity(MAX_SEND_EVENTS); - - let mut ring = io_uring::IoUring::new(RING_SIZE as u32).unwrap(); - - let (submitter, mut sq, mut cq) = ring.split(); - - submitter.register_files(&[socket.as_raw_fd()]).unwrap(); - - let fd = Fixed(0); - - let cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval); - - let mut iter_counter = 0usize; - let mut last_cleaning = Instant::now(); - - loop { - while let Some(entry) = cq.next() { - let user_data: UserData = entry.user_data().into(); - - match user_data { - UserData::RecvMsg { slab_key } => { - recv_entries.remove(slab_key); - - let result = entry.result(); - - if result < 0 { - ::log::info!( - "recvmsg error {}: {:#}", - result, - ::std::io::Error::from_raw_os_error(-result) - ); - } else if result == 0 { - ::log::info!("recvmsg error: 0 bytes read"); - } else { - let buffer_index = user_data.get_buffer_index(); - let buffer_len = result as usize; - - let src = if config.network.address.is_ipv4() { - SocketAddr::V4(SocketAddrV4::new( - Ipv4Addr::from(u32::from_be( - sockaddrs_ipv4[buffer_index].sin_addr.s_addr, - )), - u16::from_be(sockaddrs_ipv4[buffer_index].sin_port), - )) - } else { - let mut octets = sockaddrs_ipv6[buffer_index].sin6_addr.s6_addr; - let port = u16::from_be(sockaddrs_ipv6[buffer_index].sin6_port); - - for byte in octets.iter_mut() { - *byte = u8::from_be(*byte); - } - - let ip = match octets { - // Convert IPv4-mapped address (available in std but nightly-only) - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => { - Ipv4Addr::new(a, b, c, d).into() - } - octets => Ipv6Addr::from(octets).into(), - }; - - SocketAddr::new(ip, port) - }; - - let res_request = Request::from_bytes( - &buffers[buffer_index][..buffer_len], - config.protocol.max_scrape_torrents, - ); - - // FIXME: don't run every iteration - let valid_until = ValidUntil::new(config.cleaning.max_connection_age); - - handle_request( - &config, - &mut connections, - &mut pending_scrape_responses, - &mut access_list_cache, - &mut rng, - &request_sender, - &mut local_responses, - valid_until, - res_request, - src, - ); - } - } - UserData::SendMsg { slab_key } => { - send_entries.remove(slab_key); - - if entry.result() < 0 { - ::log::error!( - "sendmsg error: {:#}", - ::std::io::Error::from_raw_os_error(-entry.result()) - ); - } - } - UserData::Timeout => { - force_send_responses = true; - timeout_queued = false; - } - } - } - - for _ in 0..(MAX_RECV_EVENTS - recv_entries.len()) { - let slab_key = recv_entries.insert(()); - let user_data = UserData::RecvMsg { slab_key }; - - let msghdr_ptr: *mut msghdr = &mut msghdrs[user_data.get_buffer_index()]; - - let entry = io_uring::opcode::RecvMsg::new(fd, msghdr_ptr) - .build() - .user_data(user_data.into()); - - unsafe { - sq.push(&entry).unwrap(); - } - } - - for (response, addr) in response_receiver.try_iter() { - let opt_response = match response { - ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r), - ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)), - ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)), - }; - - if let Some(response) = opt_response { - local_responses.push((response, addr)); - } - } - - let space_in_send_queue = MAX_SEND_EVENTS - send_entries.len(); - - if force_send_responses | (local_responses.len() >= space_in_send_queue) { - let num_to_queue = (space_in_send_queue).min(local_responses.len()); - let drain_from_index = local_responses.len() - num_to_queue; - - for (response, addr) in local_responses.drain(drain_from_index..) { - queue_response( - &config, - &mut sq, - fd, - &mut send_entries, - &mut buffers, - &mut iovs, - &mut sockaddrs_ipv4, - &mut sockaddrs_ipv6, - &mut msghdrs, - response, - addr, - ); - } - - if local_responses.is_empty() { - force_send_responses = false; - } - } - - if !timeout_queued & !force_send_responses { - // Setup timer to occasionally force sending of responses - let user_data = UserData::Timeout; - - let timespec_ptr: *const Timespec = &timeout; - - let entry = io_uring::opcode::Timeout::new(timespec_ptr) - .build() - .user_data(user_data.into()); - - unsafe { - sq.push(&entry).unwrap(); - } - - timeout_queued = true; - } - - if iter_counter % 32 == 0 { - let now = Instant::now(); - - if now > last_cleaning + cleaning_duration { - connections.clean(); - - last_cleaning = now; - } - } - - let wait_for_num = if force_send_responses { - send_entries.len() - } else { - send_entries.len() + recv_entries.len() - }; - - sq.sync(); - - submitter.submit_and_wait(wait_for_num).unwrap(); - - sq.sync(); - cq.sync(); - - iter_counter = iter_counter.wrapping_add(1); - } -} - -fn queue_response( - config: &Config, - sq: &mut SubmissionQueue, - fd: Fixed, - send_entries: &mut Slab<()>, - buffers: &mut [[u8; MAX_PACKET_SIZE]], - iovs: &mut [iovec], - sockaddrs_ipv4: &mut [sockaddr_in], - sockaddrs_ipv6: &mut [sockaddr_in6], - msghdrs: &mut [msghdr], - response: Response, - addr: SocketAddr, -) { - let slab_key = send_entries.insert(()); - let user_data = UserData::SendMsg { slab_key }; - - let buffer_index = user_data.get_buffer_index(); - - let mut cursor = Cursor::new(&mut buffers[buffer_index][..]); - - match response.write(&mut cursor) { - Ok(()) => { - iovs[buffer_index].iov_len = cursor.position() as usize; - - if config.network.address.is_ipv4() { - let addr = if let SocketAddr::V4(addr) = addr { - addr - } else { - unreachable!(); - }; - - sockaddrs_ipv4[buffer_index].sin_addr.s_addr = u32::to_be((*addr.ip()).into()); - sockaddrs_ipv4[buffer_index].sin_port = u16::to_be(addr.port()); - } else { - let mut octets = match addr { - SocketAddr::V4(addr) => addr.ip().to_ipv6_mapped().octets(), - SocketAddr::V6(addr) => addr.ip().octets(), - }; - - for byte in octets.iter_mut() { - *byte = byte.to_be(); - } - - sockaddrs_ipv6[buffer_index].sin6_addr.s6_addr = octets; - sockaddrs_ipv6[buffer_index].sin6_port = u16::to_be(addr.port()); - } - } - Err(err) => { - ::log::error!("Response::write error: {:?}", err); - - send_entries.remove(slab_key); - - return; - } - } - - let msghdr_ptr: *mut msghdr = &mut msghdrs[buffer_index]; - - let entry = io_uring::opcode::SendMsg::new(fd, msghdr_ptr) - .build() - .user_data(user_data.into()); - - unsafe { - sq.push(&entry).unwrap(); - } -} - -#[cfg(test)] -mod tests { - use quickcheck::Arbitrary; - use quickcheck_macros::quickcheck; - - use super::*; - - impl quickcheck::Arbitrary for UserData { - fn arbitrary(g: &mut quickcheck::Gen) -> Self { - match (bool::arbitrary(g), bool::arbitrary(g)) { - (false, b) => { - let slab_key: u32 = Arbitrary::arbitrary(g); - let slab_key = slab_key as usize; - - if b { - UserData::RecvMsg { slab_key } - } else { - UserData::SendMsg { slab_key } - } - } - _ => UserData::Timeout, - } - } - } - - #[quickcheck] - fn test_user_data_identity(a: UserData) -> bool { - let n: u64 = a.into(); - let b = UserData::from(n); - - a == b - } -} diff --git a/scripts/run-aquatic-udp.sh b/scripts/run-aquatic-udp.sh index fe99a35..256322f 100755 --- a/scripts/run-aquatic-udp.sh +++ b/scripts/run-aquatic-udp.sh @@ -2,16 +2,4 @@ . ./scripts/env-native-cpu-without-avx-512 -USAGE="Usage: $0 [mio|io-uring] [ARGS]" - -if [ "$1" != "mio" ] && [ "$1" != "glommio" ] && [ "$1" != "io-uring" ]; then - echo "$USAGE" -else - if [ "$1" = "mio" ]; then - cargo run --release --bin aquatic_udp -- "${@:2}" - elif [ "$1" = "io-uring" ]; then - cargo run --release --features "with-io-uring" --no-default-features --bin aquatic_udp -- "${@:2}" - else - echo "$USAGE" - fi -fi +cargo run --release --bin aquatic_udp -- $@ From 8dd9177c40fc1345f4b5e716f6d2d6f30cdcc74c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:29:25 +0100 Subject: [PATCH 33/56] udp: clean up common file, move some code to other files --- .../src/lib/{common/mod.rs => common.rs} | 67 +---- aquatic_udp/src/lib/common/network.rs | 235 ----------------- aquatic_udp/src/lib/handlers.rs | 74 +++++- aquatic_udp/src/lib/network.rs | 239 +++++++++++++++++- 4 files changed, 304 insertions(+), 311 deletions(-) rename aquatic_udp/src/lib/{common/mod.rs => common.rs} (80%) delete mode 100644 aquatic_udp/src/lib/common/network.rs diff --git a/aquatic_udp/src/lib/common/mod.rs b/aquatic_udp/src/lib/common.rs similarity index 80% rename from aquatic_udp/src/lib/common/mod.rs rename to aquatic_udp/src/lib/common.rs index be382b9..e9fc24e 100644 --- a/aquatic_udp/src/lib/common/mod.rs +++ b/aquatic_udp/src/lib/common.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use std::time::Instant; use crossbeam_channel::Sender; -use socket2::{Domain, Protocol, Socket, Type}; use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap}; use aquatic_common::AHashIndexMap; @@ -15,8 +14,6 @@ use aquatic_udp_protocol::*; use crate::config::Config; -pub mod network; - pub const MAX_PACKET_SIZE: usize = 8192; pub trait Ip: Hash + PartialEq + Eq + Clone + Copy { @@ -60,58 +57,6 @@ pub enum ConnectedResponse { Scrape(PendingScrapeResponse), } -#[derive(Clone, PartialEq, Debug)] -pub struct ProtocolResponsePeer { - pub ip_address: I, - pub port: Port, -} - -pub struct ProtocolAnnounceResponse { - pub transaction_id: TransactionId, - pub announce_interval: AnnounceInterval, - pub leechers: NumberOfPeers, - pub seeders: NumberOfPeers, - pub peers: Vec>, -} - -impl Into for ProtocolAnnounceResponse { - fn into(self) -> ConnectedResponse { - ConnectedResponse::AnnounceIpv4(AnnounceResponseIpv4 { - transaction_id: self.transaction_id, - announce_interval: self.announce_interval, - leechers: self.leechers, - seeders: self.seeders, - peers: self - .peers - .into_iter() - .map(|peer| ResponsePeerIpv4 { - ip_address: peer.ip_address, - port: peer.port, - }) - .collect(), - }) - } -} - -impl Into for ProtocolAnnounceResponse { - fn into(self) -> ConnectedResponse { - ConnectedResponse::AnnounceIpv6(AnnounceResponseIpv6 { - transaction_id: self.transaction_id, - announce_interval: self.announce_interval, - leechers: self.leechers, - seeders: self.seeders, - peers: self - .peers - .into_iter() - .map(|peer| ResponsePeerIpv6 { - ip_address: peer.ip_address, - port: peer.port, - }) - .collect(), - }) - } -} - #[derive(Clone, Copy, Debug)] pub struct SocketWorkerIndex(pub usize); @@ -119,7 +64,7 @@ pub struct SocketWorkerIndex(pub usize); pub struct RequestWorkerIndex(pub usize); impl RequestWorkerIndex { - fn from_info_hash(config: &Config, info_hash: InfoHash) -> Self { + pub fn from_info_hash(config: &Config, info_hash: InfoHash) -> Self { Self(info_hash.0[0] as usize % config.request_workers) } } @@ -201,16 +146,6 @@ pub struct Peer { pub valid_until: ValidUntil, } -impl Peer { - #[inline(always)] - pub fn to_response_peer(&self) -> ProtocolResponsePeer { - ProtocolResponsePeer { - ip_address: self.ip_address, - port: self.port, - } - } -} - pub type PeerMap = AHashIndexMap>; pub struct TorrentData { diff --git a/aquatic_udp/src/lib/common/network.rs b/aquatic_udp/src/lib/common/network.rs deleted file mode 100644 index ee3ca4f..0000000 --- a/aquatic_udp/src/lib/common/network.rs +++ /dev/null @@ -1,235 +0,0 @@ -use std::{net::SocketAddr, time::Instant}; - -use aquatic_common::access_list::AccessListCache; -use aquatic_common::AHashIndexMap; -use aquatic_common::ValidUntil; -use aquatic_udp_protocol::*; -use rand::{prelude::StdRng, Rng}; - -use crate::common::*; - -#[derive(Default)] -pub struct ConnectionMap(AHashIndexMap<(ConnectionId, SocketAddr), ValidUntil>); - -impl ConnectionMap { - pub fn insert( - &mut self, - connection_id: ConnectionId, - socket_addr: SocketAddr, - valid_until: ValidUntil, - ) { - self.0.insert((connection_id, socket_addr), valid_until); - } - - pub fn contains(&self, connection_id: ConnectionId, socket_addr: SocketAddr) -> bool { - self.0.contains_key(&(connection_id, socket_addr)) - } - - pub fn clean(&mut self) { - let now = Instant::now(); - - self.0.retain(|_, v| v.0 > now); - self.0.shrink_to_fit(); - } -} - -pub struct PendingScrapeResponseMeta { - num_pending: usize, - valid_until: ValidUntil, -} - -#[derive(Default)] -pub struct PendingScrapeResponseMap( - AHashIndexMap, -); - -impl PendingScrapeResponseMap { - pub fn prepare( - &mut self, - transaction_id: TransactionId, - num_pending: usize, - valid_until: ValidUntil, - ) { - let meta = PendingScrapeResponseMeta { - num_pending, - valid_until, - }; - let response = PendingScrapeResponse { - transaction_id, - torrent_stats: BTreeMap::new(), - }; - - self.0.insert(transaction_id, (meta, response)); - } - - pub fn add_and_get_finished(&mut self, response: PendingScrapeResponse) -> Option { - let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) { - r.0.num_pending -= 1; - - r.1.torrent_stats.extend(response.torrent_stats.into_iter()); - - r.0.num_pending == 0 - } else { - ::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map"); - - false - }; - - if finished { - let response = self.0.remove(&response.transaction_id).unwrap().1; - - Some(Response::Scrape(ScrapeResponse { - transaction_id: response.transaction_id, - torrent_stats: response.torrent_stats.into_values().collect(), - })) - } else { - None - } - } - - pub fn clean(&mut self) { - let now = Instant::now(); - - self.0.retain(|_, v| v.0.valid_until.0 > now); - self.0.shrink_to_fit(); - } -} - -pub fn handle_request( - config: &Config, - connections: &mut ConnectionMap, - pending_scrape_responses: &mut PendingScrapeResponseMap, - access_list_cache: &mut AccessListCache, - rng: &mut StdRng, - request_sender: &ConnectedRequestSender, - local_responses: &mut Vec<(Response, SocketAddr)>, - valid_until: ValidUntil, - res_request: Result, - src: SocketAddr, -) { - let access_list_mode = config.access_list.mode; - - match res_request { - Ok(Request::Connect(request)) => { - let connection_id = ConnectionId(rng.gen()); - - connections.insert(connection_id, src, valid_until); - - let response = Response::Connect(ConnectResponse { - connection_id, - transaction_id: request.transaction_id, - }); - - local_responses.push((response, src)) - } - Ok(Request::Announce(request)) => { - if connections.contains(request.connection_id, src) { - if access_list_cache - .load() - .allows(access_list_mode, &request.info_hash.0) - { - let worker_index = - RequestWorkerIndex::from_info_hash(config, request.info_hash); - - request_sender.try_send_to( - worker_index, - ConnectedRequest::Announce(request), - src, - ); - } else { - let response = Response::Error(ErrorResponse { - transaction_id: request.transaction_id, - message: "Info hash not allowed".into(), - }); - - local_responses.push((response, src)) - } - } - } - Ok(Request::Scrape(request)) => { - if connections.contains(request.connection_id, src) { - let mut requests: AHashIndexMap = - Default::default(); - - let transaction_id = request.transaction_id; - - for (i, info_hash) in request.info_hashes.into_iter().enumerate() { - let pending = requests - .entry(RequestWorkerIndex::from_info_hash(&config, info_hash)) - .or_insert_with(|| PendingScrapeRequest { - transaction_id, - info_hashes: BTreeMap::new(), - }); - - pending.info_hashes.insert(i, info_hash); - } - - pending_scrape_responses.prepare(transaction_id, requests.len(), valid_until); - - for (request_worker_index, request) in requests { - request_sender.try_send_to( - request_worker_index, - ConnectedRequest::Scrape(request), - src, - ); - } - } - } - Err(err) => { - ::log::debug!("Request::from_bytes error: {:?}", err); - - if let RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } = err - { - if connections.contains(connection_id, src) { - let response = ErrorResponse { - transaction_id, - message: err.right_or("Parse error").into(), - }; - - local_responses.push((response.into(), src)); - } - } - } - } -} - -pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { - let socket = if config.network.address.is_ipv4() { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } - .expect("create socket"); - - if config.network.only_ipv6 { - socket.set_only_v6(true).expect("socket: set only ipv6"); - } - - socket.set_reuse_port(true).expect("socket: set reuse port"); - - socket - .set_nonblocking(true) - .expect("socket: set nonblocking"); - - socket - .bind(&config.network.address.into()) - .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { - ::log::error!( - "socket: failed setting recv buffer to {}: {:?}", - recv_buffer_size, - err - ); - } - } - - socket.into() -} diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index dcf2d28..dc1d719 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -1,5 +1,7 @@ use std::collections::BTreeMap; use std::net::IpAddr; +use std::net::Ipv4Addr; +use std::net::Ipv6Addr; use std::net::SocketAddr; use std::time::Duration; use std::time::Instant; @@ -15,6 +17,68 @@ use aquatic_udp_protocol::*; use crate::common::*; use crate::config::Config; +#[derive(Clone, PartialEq, Debug)] +pub struct ProtocolResponsePeer { + pub ip_address: I, + pub port: Port, +} + +impl ProtocolResponsePeer { + #[inline(always)] + fn from_peer(peer: &Peer) -> Self { + Self { + ip_address: peer.ip_address, + port: peer.port, + } + } +} + +pub struct ProtocolAnnounceResponse { + pub transaction_id: TransactionId, + pub announce_interval: AnnounceInterval, + pub leechers: NumberOfPeers, + pub seeders: NumberOfPeers, + pub peers: Vec>, +} + +impl Into for ProtocolAnnounceResponse { + fn into(self) -> ConnectedResponse { + ConnectedResponse::AnnounceIpv4(AnnounceResponseIpv4 { + transaction_id: self.transaction_id, + announce_interval: self.announce_interval, + leechers: self.leechers, + seeders: self.seeders, + peers: self + .peers + .into_iter() + .map(|peer| ResponsePeerIpv4 { + ip_address: peer.ip_address, + port: peer.port, + }) + .collect(), + }) + } +} + +impl Into for ProtocolAnnounceResponse { + fn into(self) -> ConnectedResponse { + ConnectedResponse::AnnounceIpv6(AnnounceResponseIpv6 { + transaction_id: self.transaction_id, + announce_interval: self.announce_interval, + leechers: self.leechers, + seeders: self.seeders, + peers: self + .peers + .into_iter() + .map(|peer| ResponsePeerIpv6 { + ip_address: peer.ip_address, + port: peer.port, + }) + .collect(), + }) + } +} + pub fn run_request_worker( config: Config, state: State, @@ -147,7 +211,7 @@ fn handle_announce_request_inner( &torrent_data.peers, max_num_peers_to_take, request.peer_id, - Peer::to_response_peer, + ProtocolResponsePeer::from_peer, ); ProtocolAnnounceResponse { @@ -262,14 +326,14 @@ mod tests { for i in 0..gen_num_peers { let key = gen_peer_id(i); - let value = gen_peer((i << 16) + i); + let peer = gen_peer((i << 16) + i); if i == 0 { opt_sender_key = Some(key); - opt_sender_peer = Some(value.to_response_peer()); + opt_sender_peer = Some(ProtocolResponsePeer::from_peer(&peer)); } - peer_map.insert(key, value); + peer_map.insert(key, peer); } let mut rng = thread_rng(); @@ -279,7 +343,7 @@ mod tests { &peer_map, req_num_peers, opt_sender_key.unwrap_or_else(|| gen_peer_id(1)), - Peer::to_response_peer, + ProtocolResponsePeer::from_peer, ); // Check that number of returned peers is correct diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index 5e4cac1..281ef3f 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::io::{Cursor, ErrorKind}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::sync::{ @@ -7,19 +8,247 @@ use std::sync::{ use std::time::{Duration, Instant}; use std::vec::Drain; -use aquatic_common::access_list::create_access_list_cache; -use aquatic_common::ValidUntil; use crossbeam_channel::Receiver; use mio::net::UdpSocket; use mio::{Events, Interest, Poll, Token}; -use rand::prelude::{SeedableRng, StdRng}; +use rand::prelude::{SeedableRng, StdRng, Rng}; -use aquatic_udp_protocol::{Request, Response}; +use aquatic_common::access_list::create_access_list_cache; +use aquatic_common::ValidUntil; +use aquatic_common::access_list::AccessListCache; +use aquatic_common::AHashIndexMap; +use aquatic_udp_protocol::*; +use socket2::{Domain, Protocol, Socket, Type}; -use crate::common::network::*; use crate::common::*; use crate::config::Config; +#[derive(Default)] +pub struct ConnectionMap(AHashIndexMap<(ConnectionId, SocketAddr), ValidUntil>); + +impl ConnectionMap { + pub fn insert( + &mut self, + connection_id: ConnectionId, + socket_addr: SocketAddr, + valid_until: ValidUntil, + ) { + self.0.insert((connection_id, socket_addr), valid_until); + } + + pub fn contains(&self, connection_id: ConnectionId, socket_addr: SocketAddr) -> bool { + self.0.contains_key(&(connection_id, socket_addr)) + } + + pub fn clean(&mut self) { + let now = Instant::now(); + + self.0.retain(|_, v| v.0 > now); + self.0.shrink_to_fit(); + } +} + +pub struct PendingScrapeResponseMeta { + num_pending: usize, + valid_until: ValidUntil, +} + +#[derive(Default)] +pub struct PendingScrapeResponseMap( + AHashIndexMap, +); + +impl PendingScrapeResponseMap { + pub fn prepare( + &mut self, + transaction_id: TransactionId, + num_pending: usize, + valid_until: ValidUntil, + ) { + let meta = PendingScrapeResponseMeta { + num_pending, + valid_until, + }; + let response = PendingScrapeResponse { + transaction_id, + torrent_stats: BTreeMap::new(), + }; + + self.0.insert(transaction_id, (meta, response)); + } + + pub fn add_and_get_finished(&mut self, response: PendingScrapeResponse) -> Option { + let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) { + r.0.num_pending -= 1; + + r.1.torrent_stats.extend(response.torrent_stats.into_iter()); + + r.0.num_pending == 0 + } else { + ::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map"); + + false + }; + + if finished { + let response = self.0.remove(&response.transaction_id).unwrap().1; + + Some(Response::Scrape(ScrapeResponse { + transaction_id: response.transaction_id, + torrent_stats: response.torrent_stats.into_values().collect(), + })) + } else { + None + } + } + + pub fn clean(&mut self) { + let now = Instant::now(); + + self.0.retain(|_, v| v.0.valid_until.0 > now); + self.0.shrink_to_fit(); + } +} + +pub fn handle_request( + config: &Config, + connections: &mut ConnectionMap, + pending_scrape_responses: &mut PendingScrapeResponseMap, + access_list_cache: &mut AccessListCache, + rng: &mut StdRng, + request_sender: &ConnectedRequestSender, + local_responses: &mut Vec<(Response, SocketAddr)>, + valid_until: ValidUntil, + res_request: Result, + src: SocketAddr, +) { + let access_list_mode = config.access_list.mode; + + match res_request { + Ok(Request::Connect(request)) => { + let connection_id = ConnectionId(rng.gen()); + + connections.insert(connection_id, src, valid_until); + + let response = Response::Connect(ConnectResponse { + connection_id, + transaction_id: request.transaction_id, + }); + + local_responses.push((response, src)) + } + Ok(Request::Announce(request)) => { + if connections.contains(request.connection_id, src) { + if access_list_cache + .load() + .allows(access_list_mode, &request.info_hash.0) + { + let worker_index = + RequestWorkerIndex::from_info_hash(config, request.info_hash); + + request_sender.try_send_to( + worker_index, + ConnectedRequest::Announce(request), + src, + ); + } else { + let response = Response::Error(ErrorResponse { + transaction_id: request.transaction_id, + message: "Info hash not allowed".into(), + }); + + local_responses.push((response, src)) + } + } + } + Ok(Request::Scrape(request)) => { + if connections.contains(request.connection_id, src) { + let mut requests: AHashIndexMap = + Default::default(); + + let transaction_id = request.transaction_id; + + for (i, info_hash) in request.info_hashes.into_iter().enumerate() { + let pending = requests + .entry(RequestWorkerIndex::from_info_hash(&config, info_hash)) + .or_insert_with(|| PendingScrapeRequest { + transaction_id, + info_hashes: BTreeMap::new(), + }); + + pending.info_hashes.insert(i, info_hash); + } + + pending_scrape_responses.prepare(transaction_id, requests.len(), valid_until); + + for (request_worker_index, request) in requests { + request_sender.try_send_to( + request_worker_index, + ConnectedRequest::Scrape(request), + src, + ); + } + } + } + Err(err) => { + ::log::debug!("Request::from_bytes error: {:?}", err); + + if let RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } = err + { + if connections.contains(connection_id, src) { + let response = ErrorResponse { + transaction_id, + message: err.right_or("Parse error").into(), + }; + + local_responses.push((response.into(), src)); + } + } + } + } +} + +pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { + let socket = if config.network.address.is_ipv4() { + Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) + } else { + Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) + } + .expect("create socket"); + + if config.network.only_ipv6 { + socket.set_only_v6(true).expect("socket: set only ipv6"); + } + + socket.set_reuse_port(true).expect("socket: set reuse port"); + + socket + .set_nonblocking(true) + .expect("socket: set nonblocking"); + + socket + .bind(&config.network.address.into()) + .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); + + let recv_buffer_size = config.network.socket_recv_buffer_size; + + if recv_buffer_size != 0 { + if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { + ::log::error!( + "socket: failed setting recv buffer to {}: {:?}", + recv_buffer_size, + err + ); + } + } + + socket.into() +} + pub fn run_socket_worker( state: State, config: Config, From fb5d0b130276542f1f2187863935f01bba33878e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:34:33 +0100 Subject: [PATCH 34/56] README: update to reflect that aquatic_udp glommio impl was removed --- README.md | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index b8019fc..528dad1 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ of sub-implementations for different protocols: | Name | Protocol | OS requirements | |--------------|--------------------------------------------|------------------------------------------------------------| -| aquatic_udp | [BitTorrent over UDP] | Unix-like with [mio] (default) / Linux 5.8+ with [glommio] | +| aquatic_udp | [BitTorrent over UDP] | Unix-like | | aquatic_http | [BitTorrent over HTTP] with TLS ([rustls]) | Linux 5.8+ | | aquatic_ws | [WebTorrent] | Unix-like with [mio] (default) / Linux 5.8+ with [glommio] | @@ -48,7 +48,6 @@ Compile the implementations that you are interested in: . ./scripts/env-native-cpu-without-avx-512 cargo build --release -p aquatic_udp -cargo build --release -p aquatic_udp --features "with-glommio" --no-default-features cargo build --release -p aquatic_http cargo build --release -p aquatic_ws cargo build --release -p aquatic_ws --features "with-glommio" --no-default-features @@ -122,20 +121,15 @@ except that it: Supports IPv4 and IPv6 (BitTorrent UDP protocol doesn't support IPv6 very well, however.) -#### Alternative implementation using io_uring - -[io_uring]: https://en.wikipedia.org/wiki/Io_uring -[glommio]: https://github.com/DataDog/glommio - -There is an alternative implementation that utilizes [io_uring] by running on -[glommio]. It only runs on Linux and requires a recent kernel (version 5.8 or later). - #### Performance ![UDP BitTorrent tracker throughput comparison](./documents/aquatic-udp-load-test-illustration-2021-11-08.png) More details are available [here](./documents/aquatic-udp-load-test-2021-11-08.pdf). +Since making this benchmark, I have improved the mio-based implementation +considerably and removed the glommio-based implementation. + ### aquatic_http: HTTP BitTorrent tracker [HTTP BitTorrent protocol]: https://wiki.theory.org/index.php/BitTorrentSpecification#Tracker_HTTP.2FHTTPS_Protocol From 210550c71921ba41d58c3b1ec5129a3cd42ed20d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:37:32 +0100 Subject: [PATCH 35/56] udp: restructure config.rs for better readability --- aquatic_udp/src/lib/config.rs | 108 +++++++++++++++++----------------- 1 file changed, 54 insertions(+), 54 deletions(-) diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index f26ba4e..3db300d 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -27,6 +27,25 @@ pub struct Config { pub cpu_pinning: aquatic_common::cpu_pinning::CpuPinningConfig, } +impl Default for Config { + fn default() -> Self { + Self { + socket_workers: 1, + request_workers: 1, + log_level: LogLevel::Error, + network: NetworkConfig::default(), + protocol: ProtocolConfig::default(), + handlers: HandlerConfig::default(), + statistics: StatisticsConfig::default(), + cleaning: CleaningConfig::default(), + privileges: PrivilegeConfig::default(), + access_list: AccessListConfig::default(), + #[cfg(feature = "cpu-pinning")] + cpu_pinning: Default::default(), + } + } +} + impl aquatic_cli_helpers::Config for Config { fn get_log_level(&self) -> Option { Some(self.log_level) @@ -57,6 +76,17 @@ pub struct NetworkConfig { pub poll_event_capacity: usize, } +impl Default for NetworkConfig { + fn default() -> Self { + Self { + address: SocketAddr::from(([0, 0, 0, 0], 3000)), + only_ipv6: false, + socket_recv_buffer_size: 4096 * 128, + poll_event_capacity: 4096, + } + } +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct ProtocolConfig { @@ -68,12 +98,30 @@ pub struct ProtocolConfig { pub peer_announce_interval: i32, } +impl Default for ProtocolConfig { + fn default() -> Self { + Self { + max_scrape_torrents: 255, + max_response_peers: 255, + peer_announce_interval: 60 * 15, + } + } +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct HandlerConfig { pub channel_recv_timeout_ms: u64, } +impl Default for HandlerConfig { + fn default() -> Self { + Self { + channel_recv_timeout_ms: 100, + } + } +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct StatisticsConfig { @@ -81,6 +129,12 @@ pub struct StatisticsConfig { pub interval: u64, } +impl Default for StatisticsConfig { + fn default() -> Self { + Self { interval: 0 } + } +} + #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(default)] pub struct CleaningConfig { @@ -94,60 +148,6 @@ pub struct CleaningConfig { pub max_peer_age: u64, } -impl Default for Config { - fn default() -> Self { - Self { - socket_workers: 1, - request_workers: 1, - log_level: LogLevel::Error, - network: NetworkConfig::default(), - protocol: ProtocolConfig::default(), - handlers: HandlerConfig::default(), - statistics: StatisticsConfig::default(), - cleaning: CleaningConfig::default(), - privileges: PrivilegeConfig::default(), - access_list: AccessListConfig::default(), - #[cfg(feature = "cpu-pinning")] - cpu_pinning: Default::default(), - } - } -} - -impl Default for NetworkConfig { - fn default() -> Self { - Self { - address: SocketAddr::from(([0, 0, 0, 0], 3000)), - only_ipv6: false, - socket_recv_buffer_size: 4096 * 128, - poll_event_capacity: 4096, - } - } -} - -impl Default for ProtocolConfig { - fn default() -> Self { - Self { - max_scrape_torrents: 255, - max_response_peers: 255, - peer_announce_interval: 60 * 15, - } - } -} - -impl Default for HandlerConfig { - fn default() -> Self { - Self { - channel_recv_timeout_ms: 100, - } - } -} - -impl Default for StatisticsConfig { - fn default() -> Self { - Self { interval: 0 } - } -} - impl Default for CleaningConfig { fn default() -> Self { Self { From 07b959c9d46b73711f70fa5d1d2b41aed759e94d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:37:46 +0100 Subject: [PATCH 36/56] udp: run cargo fmt --- aquatic_udp/src/lib/handlers.rs | 2 +- aquatic_udp/src/lib/network.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index dc1d719..5653290 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -23,7 +23,7 @@ pub struct ProtocolResponsePeer { pub port: Port, } -impl ProtocolResponsePeer { +impl ProtocolResponsePeer { #[inline(always)] fn from_peer(peer: &Peer) -> Self { Self { diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index 281ef3f..2fb6769 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -11,12 +11,12 @@ use std::vec::Drain; use crossbeam_channel::Receiver; use mio::net::UdpSocket; use mio::{Events, Interest, Poll, Token}; -use rand::prelude::{SeedableRng, StdRng, Rng}; +use rand::prelude::{Rng, SeedableRng, StdRng}; use aquatic_common::access_list::create_access_list_cache; -use aquatic_common::ValidUntil; use aquatic_common::access_list::AccessListCache; use aquatic_common::AHashIndexMap; +use aquatic_common::ValidUntil; use aquatic_udp_protocol::*; use socket2::{Domain, Protocol, Socket, Type}; From 96c5775eccd3c371071e63470164b645a9c4e65b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:51:19 +0100 Subject: [PATCH 37/56] Update README --- README.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 528dad1..36bb593 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,8 @@ of sub-implementations for different protocols: - Install Rust with [rustup](https://rustup.rs/) (stable is recommended) - Install cmake with your package manager (e.g., `apt-get install cmake`) -- Unless you're planning to only run aquatic_udp and only the cross-platform, - mio based implementation, make sure locked memory limits are sufficient. +- Unless you're planning to only run the cross-platform mio based + implementations, make sure locked memory limits are sufficient. You can do this by adding the following lines to `/etc/security/limits.conf`, and then logging out and back in: @@ -118,8 +118,7 @@ except that it: source IP is always used. * Doesn't track of the number of torrent downloads (0 is always sent). -Supports IPv4 and IPv6 (BitTorrent UDP protocol doesn't support IPv6 very well, -however.) +Supports IPv4 and IPv6. #### Performance @@ -130,6 +129,12 @@ More details are available [here](./documents/aquatic-udp-load-test-2021-11-08.p Since making this benchmark, I have improved the mio-based implementation considerably and removed the glommio-based implementation. +#### Optimisation attempts that didn't work out + +* Using glommio +* Using io-uring +* Using zerocopy + vectored sends for responses + ### aquatic_http: HTTP BitTorrent tracker [HTTP BitTorrent protocol]: https://wiki.theory.org/index.php/BitTorrentSpecification#Tracker_HTTP.2FHTTPS_Protocol From ce3b758f8e4248a3de42481ab89a31bde1bcd181 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 22:52:12 +0100 Subject: [PATCH 38/56] GitHub CI: stop trying to build aquatic_udp with glommio --- .github/workflows/cargo-build-and-test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/cargo-build-and-test.yml b/.github/workflows/cargo-build-and-test.yml index a2c7735..350b1cc 100644 --- a/.github/workflows/cargo-build-and-test.yml +++ b/.github/workflows/cargo-build-and-test.yml @@ -18,7 +18,6 @@ jobs: - name: Build run: | cargo build --verbose -p aquatic_udp --features "cpu-pinning" - cargo build --verbose -p aquatic_udp --features "with-glommio cpu-pinning" --no-default-features cargo build --verbose -p aquatic_http --features "cpu-pinning" From 3d88689c772e94a73f10a0bd8c4a9ada683a103b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Thu, 18 Nov 2021 23:03:54 +0100 Subject: [PATCH 39/56] Update TODO --- TODO.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/TODO.md b/TODO.md index 488b937..aa8d1bf 100644 --- a/TODO.md +++ b/TODO.md @@ -21,12 +21,11 @@ * load testing shows that with sharded state, mio reaches 1.4M responses per second with 6 socket and 4 request workers. performance is great overall and faster than without sharding. io_uring impl is a lot behind mio impl with new load tester + * look at proper cpu pinning (check that one thread gets bound per core) + * then consider so_attach_reuseport_cbpf * what poll event capacity is actually needed? * mio * stagger connection cleaning intervals? - * uring - * ValidUntil periodic update - * statistics * aquatic_http: * clean out connections regularly From c1f2d036c0cd65c8afe73da75aece612870ed9b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 00:14:46 +0100 Subject: [PATCH 40/56] add scripts/watch-threads.sh for showing runtime stats Includes per-thread cpu affinity (PSR) --- scripts/watch-threads.sh | 3 +++ 1 file changed, 3 insertions(+) create mode 100755 scripts/watch-threads.sh diff --git a/scripts/watch-threads.sh b/scripts/watch-threads.sh new file mode 100755 index 0000000..864fbad --- /dev/null +++ b/scripts/watch-threads.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +watch -d -n 0.5 ps H -o euser,pid,tid,comm,%mem,rss,%cpu,psr -p `pgrep aquatic` From 593a46452f6f741ecc7470e7738ccfd1b487f387 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 00:17:14 +0100 Subject: [PATCH 41/56] aquatic_common: use hwloc for cpu pinning, for automatic core selection This might not work very well on virtualized hosts, however.. --- Cargo.lock | 69 +++++++++++++++++------- aquatic_common/Cargo.toml | 5 +- aquatic_common/src/cpu_pinning.rs | 89 +++++++++++++++---------------- 3 files changed, 97 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2e45b4..d95700c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,18 +17,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "affinity" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763e484feceb7dd021b21c5c6f81aee06b1594a743455ec7efbf72e6355e447b" -dependencies = [ - "cfg-if", - "errno", - "libc", - "num_cpus", -] - [[package]] name = "ahash" version = "0.3.8" @@ -86,12 +74,12 @@ dependencies = [ name = "aquatic_common" version = "0.1.0" dependencies = [ - "affinity", "ahash 0.7.6", "anyhow", "arc-swap", "hashbrown 0.11.2", "hex", + "hwloc", "indexmap-amortized", "libc", "log", @@ -399,6 +387,12 @@ dependencies = [ "serde_bytes", ] +[[package]] +name = "bitflags" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" + [[package]] name = "bitflags" version = "1.3.2" @@ -502,7 +496,7 @@ version = "2.33.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" dependencies = [ - "bitflags", + "bitflags 1.3.2", "textwrap", "unicode-width", ] @@ -962,7 +956,7 @@ version = "0.6.0" source = "git+https://github.com/DataDog/glommio.git?rev=4e6b14772da2f4325271fbcf12d24cf91ed466e5#4e6b14772da2f4325271fbcf12d24cf91ed466e5" dependencies = [ "ahash 0.7.6", - "bitflags", + "bitflags 1.3.2", "bitmaps", "buddy-alloc", "cc", @@ -1076,6 +1070,21 @@ version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +[[package]] +name = "hwloc" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2934f84993b8b4bcae9b6a4e5f0aca638462dda9c7b4f26a570241494f21e0f4" +dependencies = [ + "bitflags 0.7.0", + "errno", + "kernel32-sys", + "libc", + "num", + "pkg-config", + "winapi 0.2.8", +] + [[package]] name = "idna" version = "0.2.3" @@ -1323,7 +1332,7 @@ version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cc", "cfg-if", "libc", @@ -1345,6 +1354,17 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "num" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4703ad64153382334aa8db57c637364c322d3372e097840c72000dabdcf6156e" +dependencies = [ + "num-integer", + "num-iter", + "num-traits", +] + [[package]] name = "num-format" version = "0.4.0" @@ -1365,6 +1385,17 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.14" @@ -1424,7 +1455,7 @@ version = "0.10.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "foreign-types", "libc", @@ -1689,7 +1720,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] @@ -1837,7 +1868,7 @@ version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" dependencies = [ - "bitflags", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", diff --git a/aquatic_common/Cargo.toml b/aquatic_common/Cargo.toml index b076f77..951b65f 100644 --- a/aquatic_common/Cargo.toml +++ b/aquatic_common/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/greatest-ape/aquatic" name = "aquatic_common" [features] -cpu-pinning = ["affinity", "libc"] +cpu-pinning = ["hwloc", "libc"] [dependencies] ahash = "0.7" @@ -25,5 +25,6 @@ privdrop = "0.5" rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } -affinity = { version = "0.1", optional = true } +# cpu-pinning +hwloc = { version = "0.5", optional = true } libc = { version = "0.2", optional = true } \ No newline at end of file diff --git a/aquatic_common/src/cpu_pinning.rs b/aquatic_common/src/cpu_pinning.rs index 43f506b..a4065df 100644 --- a/aquatic_common/src/cpu_pinning.rs +++ b/aquatic_common/src/cpu_pinning.rs @@ -1,3 +1,4 @@ +use hwloc::{CpuSet, ObjectType, Topology, CPUBIND_THREAD}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -17,8 +18,7 @@ impl Default for CpuPinningMode { pub struct CpuPinningConfig { pub active: bool, pub mode: CpuPinningMode, - pub virtual_per_physical_cpu: usize, - pub offset_cpus: usize, + pub core_offset: usize, } impl Default for CpuPinningConfig { @@ -26,8 +26,7 @@ impl Default for CpuPinningConfig { Self { active: false, mode: Default::default(), - virtual_per_physical_cpu: 2, - offset_cpus: 0, + core_offset: 0, } } } @@ -49,59 +48,59 @@ pub enum WorkerIndex { } impl WorkerIndex { - fn get_cpu_indices(self, config: &CpuPinningConfig, socket_workers: usize) -> Vec { - let offset = match self { - Self::Other => config.virtual_per_physical_cpu * config.offset_cpus, - Self::SocketWorker(index) => { - config.virtual_per_physical_cpu * (config.offset_cpus + 1 + index) - } - Self::RequestWorker(index) => { - config.virtual_per_physical_cpu * (config.offset_cpus + 1 + socket_workers + index) - } + fn get_core_index( + self, + config: &CpuPinningConfig, + socket_workers: usize, + core_count: usize, + ) -> usize { + let ascending_index = match self { + Self::Other => config.core_offset, + Self::SocketWorker(index) => config.core_offset + 1 + index, + Self::RequestWorker(index) => config.core_offset + 1 + socket_workers + index, }; - let virtual_cpus = (0..config.virtual_per_physical_cpu).map(|i| offset + i); - - let virtual_cpus: Vec = match config.mode { - CpuPinningMode::Ascending => virtual_cpus.collect(), - CpuPinningMode::Descending => { - let max_index = affinity::get_core_num() - 1; - - virtual_cpus - .map(|i| max_index.checked_sub(i).unwrap_or(0)) - .collect() - } - }; - - ::log::info!( - "Calculated virtual CPU pin indices {:?} for {:?}", - virtual_cpus, - self - ); - - virtual_cpus + match config.mode { + CpuPinningMode::Ascending => ascending_index, + CpuPinningMode::Descending => core_count - 1 - ascending_index, + } } } -/// Note: don't call this when affinities were already set in the current or in -/// a parent thread. Doing so limits the number of cores that are seen and -/// messes up setting affinities. +/// Pin current thread to a suitable core +/// +/// Requires hwloc (`apt-get install libhwloc-dev`) pub fn pin_current_if_configured_to( config: &CpuPinningConfig, socket_workers: usize, worker_index: WorkerIndex, ) { if config.active { - let indices = worker_index.get_cpu_indices(config, socket_workers); + let mut topology = Topology::new(); - if let Err(err) = affinity::set_thread_affinity(indices.clone()) { - ::log::error!( - "Failed setting thread affinities {:?} for {:?}: {:#?}", - indices, - worker_index, - err - ); - } + let core_cpu_sets: Vec = topology + .objects_with_type(&ObjectType::Core) + .expect("hwloc: list cores") + .into_iter() + .map(|core| core.allowed_cpuset().expect("hwloc: get core cpu set")) + .collect(); + + let core_index = worker_index.get_core_index(config, socket_workers, core_cpu_sets.len()); + + let cpu_set = core_cpu_sets + .get(core_index) + .expect(&format!("get cpu set for core {}", core_index)) + .to_owned(); + + topology + .set_cpubind(cpu_set, CPUBIND_THREAD) + .expect(&format!("bind thread to core {}", core_index)); + + ::log::info!( + "Pinned worker {:?} to cpu core {}", + worker_index, + core_index + ); } } From 44ad2167fb99487c8fb5e37db79d243d1b56c8db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 00:21:51 +0100 Subject: [PATCH 42/56] CI: install libhwloc-dev --- .github/actions/test-transfer/entrypoint.sh | 2 +- .github/workflows/cargo-build-and-test.yml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/actions/test-transfer/entrypoint.sh b/.github/actions/test-transfer/entrypoint.sh index 715d0e5..f9095ea 100755 --- a/.github/actions/test-transfer/entrypoint.sh +++ b/.github/actions/test-transfer/entrypoint.sh @@ -19,7 +19,7 @@ fi ulimit -a $SUDO apt-get update -$SUDO apt-get install -y cmake libssl-dev screen rtorrent mktorrent ssl-cert ca-certificates curl golang +$SUDO apt-get install -y cmake libssl-dev screen rtorrent mktorrent ssl-cert ca-certificates curl golang libhwloc-dev git clone https://github.com/anacrolix/torrent.git gotorrent cd gotorrent diff --git a/.github/workflows/cargo-build-and-test.yml b/.github/workflows/cargo-build-and-test.yml index 350b1cc..4dc2e46 100644 --- a/.github/workflows/cargo-build-and-test.yml +++ b/.github/workflows/cargo-build-and-test.yml @@ -15,6 +15,8 @@ jobs: timeout-minutes: 10 steps: - uses: actions/checkout@v2 + - name: Install dependencies + run: sudo apt-get update -y && sudo apt-get install libhwloc-dev -y - name: Build run: | cargo build --verbose -p aquatic_udp --features "cpu-pinning" From 2e7c8ac90434ff17abf9325fa488e89d37bd43e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 00:59:52 +0100 Subject: [PATCH 43/56] udp: reorder code in network.rs for better readability --- aquatic_udp/src/lib/network.rs | 278 ++++++++++++++++----------------- 1 file changed, 139 insertions(+), 139 deletions(-) diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index 2fb6769..704fb5a 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -110,145 +110,6 @@ impl PendingScrapeResponseMap { } } -pub fn handle_request( - config: &Config, - connections: &mut ConnectionMap, - pending_scrape_responses: &mut PendingScrapeResponseMap, - access_list_cache: &mut AccessListCache, - rng: &mut StdRng, - request_sender: &ConnectedRequestSender, - local_responses: &mut Vec<(Response, SocketAddr)>, - valid_until: ValidUntil, - res_request: Result, - src: SocketAddr, -) { - let access_list_mode = config.access_list.mode; - - match res_request { - Ok(Request::Connect(request)) => { - let connection_id = ConnectionId(rng.gen()); - - connections.insert(connection_id, src, valid_until); - - let response = Response::Connect(ConnectResponse { - connection_id, - transaction_id: request.transaction_id, - }); - - local_responses.push((response, src)) - } - Ok(Request::Announce(request)) => { - if connections.contains(request.connection_id, src) { - if access_list_cache - .load() - .allows(access_list_mode, &request.info_hash.0) - { - let worker_index = - RequestWorkerIndex::from_info_hash(config, request.info_hash); - - request_sender.try_send_to( - worker_index, - ConnectedRequest::Announce(request), - src, - ); - } else { - let response = Response::Error(ErrorResponse { - transaction_id: request.transaction_id, - message: "Info hash not allowed".into(), - }); - - local_responses.push((response, src)) - } - } - } - Ok(Request::Scrape(request)) => { - if connections.contains(request.connection_id, src) { - let mut requests: AHashIndexMap = - Default::default(); - - let transaction_id = request.transaction_id; - - for (i, info_hash) in request.info_hashes.into_iter().enumerate() { - let pending = requests - .entry(RequestWorkerIndex::from_info_hash(&config, info_hash)) - .or_insert_with(|| PendingScrapeRequest { - transaction_id, - info_hashes: BTreeMap::new(), - }); - - pending.info_hashes.insert(i, info_hash); - } - - pending_scrape_responses.prepare(transaction_id, requests.len(), valid_until); - - for (request_worker_index, request) in requests { - request_sender.try_send_to( - request_worker_index, - ConnectedRequest::Scrape(request), - src, - ); - } - } - } - Err(err) => { - ::log::debug!("Request::from_bytes error: {:?}", err); - - if let RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } = err - { - if connections.contains(connection_id, src) { - let response = ErrorResponse { - transaction_id, - message: err.right_or("Parse error").into(), - }; - - local_responses.push((response.into(), src)); - } - } - } - } -} - -pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { - let socket = if config.network.address.is_ipv4() { - Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) - } else { - Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) - } - .expect("create socket"); - - if config.network.only_ipv6 { - socket.set_only_v6(true).expect("socket: set only ipv6"); - } - - socket.set_reuse_port(true).expect("socket: set reuse port"); - - socket - .set_nonblocking(true) - .expect("socket: set nonblocking"); - - socket - .bind(&config.network.address.into()) - .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); - - let recv_buffer_size = config.network.socket_recv_buffer_size; - - if recv_buffer_size != 0 { - if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { - ::log::error!( - "socket: failed setting recv buffer to {}: {:?}", - recv_buffer_size, - err - ); - } - } - - socket.into() -} - pub fn run_socket_worker( state: State, config: Config, @@ -412,6 +273,108 @@ fn read_requests( } } +pub fn handle_request( + config: &Config, + connections: &mut ConnectionMap, + pending_scrape_responses: &mut PendingScrapeResponseMap, + access_list_cache: &mut AccessListCache, + rng: &mut StdRng, + request_sender: &ConnectedRequestSender, + local_responses: &mut Vec<(Response, SocketAddr)>, + valid_until: ValidUntil, + res_request: Result, + src: SocketAddr, +) { + let access_list_mode = config.access_list.mode; + + match res_request { + Ok(Request::Connect(request)) => { + let connection_id = ConnectionId(rng.gen()); + + connections.insert(connection_id, src, valid_until); + + let response = Response::Connect(ConnectResponse { + connection_id, + transaction_id: request.transaction_id, + }); + + local_responses.push((response, src)) + } + Ok(Request::Announce(request)) => { + if connections.contains(request.connection_id, src) { + if access_list_cache + .load() + .allows(access_list_mode, &request.info_hash.0) + { + let worker_index = + RequestWorkerIndex::from_info_hash(config, request.info_hash); + + request_sender.try_send_to( + worker_index, + ConnectedRequest::Announce(request), + src, + ); + } else { + let response = Response::Error(ErrorResponse { + transaction_id: request.transaction_id, + message: "Info hash not allowed".into(), + }); + + local_responses.push((response, src)) + } + } + } + Ok(Request::Scrape(request)) => { + if connections.contains(request.connection_id, src) { + let mut requests: AHashIndexMap = + Default::default(); + + let transaction_id = request.transaction_id; + + for (i, info_hash) in request.info_hashes.into_iter().enumerate() { + let pending = requests + .entry(RequestWorkerIndex::from_info_hash(&config, info_hash)) + .or_insert_with(|| PendingScrapeRequest { + transaction_id, + info_hashes: BTreeMap::new(), + }); + + pending.info_hashes.insert(i, info_hash); + } + + pending_scrape_responses.prepare(transaction_id, requests.len(), valid_until); + + for (request_worker_index, request) in requests { + request_sender.try_send_to( + request_worker_index, + ConnectedRequest::Scrape(request), + src, + ); + } + } + } + Err(err) => { + ::log::debug!("Request::from_bytes error: {:?}", err); + + if let RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } = err + { + if connections.contains(connection_id, src) { + let response = ErrorResponse { + transaction_id, + message: err.right_or("Parse error").into(), + }; + + local_responses.push((response.into(), src)); + } + } + } + } +} + #[inline] fn send_responses( state: &State, @@ -516,3 +479,40 @@ fn send_response( } } } + +pub fn create_socket(config: &Config) -> ::std::net::UdpSocket { + let socket = if config.network.address.is_ipv4() { + Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP)) + } else { + Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP)) + } + .expect("create socket"); + + if config.network.only_ipv6 { + socket.set_only_v6(true).expect("socket: set only ipv6"); + } + + socket.set_reuse_port(true).expect("socket: set reuse port"); + + socket + .set_nonblocking(true) + .expect("socket: set nonblocking"); + + socket + .bind(&config.network.address.into()) + .unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err)); + + let recv_buffer_size = config.network.socket_recv_buffer_size; + + if recv_buffer_size != 0 { + if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) { + ::log::error!( + "socket: failed setting recv buffer to {}: {:?}", + recv_buffer_size, + err + ); + } + } + + socket.into() +} From 9c919a6ecbefdb8243854ac38aedf6d99e856061 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 01:10:37 +0100 Subject: [PATCH 44/56] udp: clean pending scrape map --- aquatic_udp/src/lib/config.rs | 11 +++++++++++ aquatic_udp/src/lib/network.rs | 35 +++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 9 deletions(-) diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index 3db300d..978344d 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -142,10 +142,19 @@ pub struct CleaningConfig { pub connection_cleaning_interval: u64, /// Clean torrents this often (seconds) pub torrent_cleaning_interval: u64, + /// Clean pending scrape responses this often (seconds) + /// + /// In regular operation, there should be no pending scrape responses + /// lingering for a long time. However, the cleaning also returns unused + /// allocated memory to the OS, so the interval can be configured here. + pub pending_scrape_cleaning_interval: u64, /// Remove connections that are older than this (seconds) pub max_connection_age: u64, /// Remove peers that haven't announced for this long (seconds) pub max_peer_age: u64, + /// Remove pending scrape responses that haven't been returned from request + /// workers for this long (seconds) + pub max_pending_scrape_age: u64, } impl Default for CleaningConfig { @@ -153,8 +162,10 @@ impl Default for CleaningConfig { Self { connection_cleaning_interval: 60, torrent_cleaning_interval: 60 * 2, + pending_scrape_cleaning_interval: 60 * 10, max_connection_age: 60 * 5, max_peer_age: 60 * 20, + max_pending_scrape_age: 60, } } } diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index 704fb5a..4fbb97a 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -140,10 +140,15 @@ pub fn run_socket_worker( let timeout = Duration::from_millis(50); - let cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval); + let connection_cleaning_duration = + Duration::from_secs(config.cleaning.connection_cleaning_interval); + let pending_scrape_cleaning_duration = + Duration::from_secs(config.cleaning.pending_scrape_cleaning_interval); + + let mut last_connection_cleaning = Instant::now(); + let mut last_pending_scrape_cleaning = Instant::now(); let mut iter_counter = 0usize; - let mut last_cleaning = Instant::now(); loop { poll.poll(&mut events, Some(timeout)) @@ -180,10 +185,15 @@ pub fn run_socket_worker( if iter_counter % 32 == 0 { let now = Instant::now(); - if now > last_cleaning + cleaning_duration { + if now > last_connection_cleaning + connection_cleaning_duration { connections.clean(); - last_cleaning = now; + last_connection_cleaning = now; + } + if now > last_pending_scrape_cleaning + pending_scrape_cleaning_duration { + pending_scrape_responses.clean(); + + last_pending_scrape_cleaning = now; } } @@ -206,7 +216,8 @@ fn read_requests( let mut requests_received: usize = 0; let mut bytes_received: usize = 0; - let valid_until = ValidUntil::new(config.cleaning.max_connection_age); + let connection_valid_until = ValidUntil::new(config.cleaning.max_connection_age); + let pending_scrape_valid_until = ValidUntil::new(config.cleaning.max_pending_scrape_age); let mut access_list_cache = create_access_list_cache(&state.access_list); @@ -246,7 +257,8 @@ fn read_requests( rng, request_sender, local_responses, - valid_until, + connection_valid_until, + pending_scrape_valid_until, res_request, src, ); @@ -281,7 +293,8 @@ pub fn handle_request( rng: &mut StdRng, request_sender: &ConnectedRequestSender, local_responses: &mut Vec<(Response, SocketAddr)>, - valid_until: ValidUntil, + connection_valid_until: ValidUntil, + pending_scrape_valid_until: ValidUntil, res_request: Result, src: SocketAddr, ) { @@ -291,7 +304,7 @@ pub fn handle_request( Ok(Request::Connect(request)) => { let connection_id = ConnectionId(rng.gen()); - connections.insert(connection_id, src, valid_until); + connections.insert(connection_id, src, connection_valid_until); let response = Response::Connect(ConnectResponse { connection_id, @@ -342,7 +355,11 @@ pub fn handle_request( pending.info_hashes.insert(i, info_hash); } - pending_scrape_responses.prepare(transaction_id, requests.len(), valid_until); + pending_scrape_responses.prepare( + transaction_id, + requests.len(), + pending_scrape_valid_until, + ); for (request_worker_index, request) in requests { request_sender.try_send_to( From a530dc28602f343f09ae98a7d83ec9193e4f8215 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 01:18:58 +0100 Subject: [PATCH 45/56] udp: network: update ValidUntil structs less often --- aquatic_common/src/lib.rs | 3 +++ aquatic_udp/src/lib/network.rs | 16 +++++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/aquatic_common/src/lib.rs b/aquatic_common/src/lib.rs index bae5471..25488f0 100644 --- a/aquatic_common/src/lib.rs +++ b/aquatic_common/src/lib.rs @@ -23,6 +23,9 @@ impl ValidUntil { pub fn new(offset_seconds: u64) -> Self { Self(Instant::now() + Duration::from_secs(offset_seconds)) } + pub fn new_with_now(now: Instant, offset_seconds: u64) -> Self { + Self(now + Duration::from_secs(offset_seconds)) + } } /// Extract response peers diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index 4fbb97a..c38c311 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -145,6 +145,9 @@ pub fn run_socket_worker( let pending_scrape_cleaning_duration = Duration::from_secs(config.cleaning.pending_scrape_cleaning_interval); + let mut connection_valid_until = ValidUntil::new(config.cleaning.max_connection_age); + let mut pending_scrape_valid_until = ValidUntil::new(config.cleaning.max_pending_scrape_age); + let mut last_connection_cleaning = Instant::now(); let mut last_pending_scrape_cleaning = Instant::now(); @@ -168,6 +171,8 @@ pub fn run_socket_worker( &mut buffer, &request_sender, &mut local_responses, + connection_valid_until, + pending_scrape_valid_until, ); } } @@ -182,9 +187,15 @@ pub fn run_socket_worker( local_responses.drain(..), ); + // Run periodic ValidUntil updates and state cleaning if iter_counter % 32 == 0 { let now = Instant::now(); + connection_valid_until = + ValidUntil::new_with_now(now, config.cleaning.max_connection_age); + pending_scrape_valid_until = + ValidUntil::new_with_now(now, config.cleaning.max_pending_scrape_age); + if now > last_connection_cleaning + connection_cleaning_duration { connections.clean(); @@ -212,13 +223,12 @@ fn read_requests( buffer: &mut [u8], request_sender: &ConnectedRequestSender, local_responses: &mut Vec<(Response, SocketAddr)>, + connection_valid_until: ValidUntil, + pending_scrape_valid_until: ValidUntil, ) { let mut requests_received: usize = 0; let mut bytes_received: usize = 0; - let connection_valid_until = ValidUntil::new(config.cleaning.max_connection_age); - let pending_scrape_valid_until = ValidUntil::new(config.cleaning.max_pending_scrape_age); - let mut access_list_cache = create_access_list_cache(&state.access_list); loop { From 029193b512924c4c10e2d4585df2d811d0a5e942 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 01:20:39 +0100 Subject: [PATCH 46/56] udp: network: check less often if periodic tasks need to be done --- aquatic_udp/src/lib/network.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index c38c311..566ba40 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -188,7 +188,7 @@ pub fn run_socket_worker( ); // Run periodic ValidUntil updates and state cleaning - if iter_counter % 32 == 0 { + if iter_counter % 128 == 0 { let now = Instant::now(); connection_valid_until = From dc841ef0de57a7be5827b4861c980fa72215bd45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 01:23:41 +0100 Subject: [PATCH 47/56] udp: add config setting for network poll timeout --- aquatic_udp/src/lib/config.rs | 2 ++ aquatic_udp/src/lib/network.rs | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/aquatic_udp/src/lib/config.rs b/aquatic_udp/src/lib/config.rs index 978344d..35f2e0a 100644 --- a/aquatic_udp/src/lib/config.rs +++ b/aquatic_udp/src/lib/config.rs @@ -74,6 +74,7 @@ pub struct NetworkConfig { /// $ sudo sysctl -w net.core.rmem_default=104857600 pub socket_recv_buffer_size: usize, pub poll_event_capacity: usize, + pub poll_timeout_ms: u64, } impl Default for NetworkConfig { @@ -83,6 +84,7 @@ impl Default for NetworkConfig { only_ipv6: false, socket_recv_buffer_size: 4096 * 128, poll_event_capacity: 4096, + poll_timeout_ms: 50, } } } diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index 566ba40..d1727cc 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -138,7 +138,7 @@ pub fn run_socket_worker( let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new(); - let timeout = Duration::from_millis(50); + let poll_timeout = Duration::from_millis(config.network.poll_timeout_ms); let connection_cleaning_duration = Duration::from_secs(config.cleaning.connection_cleaning_interval); @@ -154,7 +154,7 @@ pub fn run_socket_worker( let mut iter_counter = 0usize; loop { - poll.poll(&mut events, Some(timeout)) + poll.poll(&mut events, Some(poll_timeout)) .expect("failed polling"); for event in events.iter() { From de52d35357d288a7e0d38d765ddcacbf56a8b5b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 02:45:48 +0100 Subject: [PATCH 48/56] Update TODO --- TODO.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/TODO.md b/TODO.md index aa8d1bf..ed8ba7f 100644 --- a/TODO.md +++ b/TODO.md @@ -17,15 +17,15 @@ * cargo-deny * aquatic_udp + * look at proper cpu pinning (check that one thread gets bound per core) + * then consider so_attach_reuseport_cbpf + * implement statistics for total number of torrents and peers again? + * what poll event capacity is actually needed? + * stagger connection cleaning intervals? * notes * load testing shows that with sharded state, mio reaches 1.4M responses per second with 6 socket and 4 request workers. performance is great overall and faster than without sharding. io_uring impl is a lot behind mio impl with new load tester - * look at proper cpu pinning (check that one thread gets bound per core) - * then consider so_attach_reuseport_cbpf - * what poll event capacity is actually needed? - * mio - * stagger connection cleaning intervals? * aquatic_http: * clean out connections regularly From 028a366ce5689f4a4039339548280af4f4fe9a71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 02:45:53 +0100 Subject: [PATCH 49/56] Don't unnecessarily constrain crate versions in Cargo.toml files --- aquatic_cli_helpers/Cargo.toml | 2 +- aquatic_common/Cargo.toml | 2 +- aquatic_http/Cargo.toml | 4 ++-- aquatic_http_load_test/Cargo.toml | 6 +++--- aquatic_http_protocol/Cargo.toml | 8 ++++---- aquatic_udp/Cargo.toml | 6 +++--- aquatic_udp_bench/Cargo.toml | 2 +- aquatic_udp_load_test/Cargo.toml | 8 ++++---- aquatic_udp_protocol/Cargo.toml | 4 ++-- aquatic_ws/Cargo.toml | 8 ++++---- aquatic_ws_load_test/Cargo.toml | 6 +++--- aquatic_ws_protocol/Cargo.toml | 8 ++++---- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/aquatic_cli_helpers/Cargo.toml b/aquatic_cli_helpers/Cargo.toml index 20886f2..2ea0f5f 100644 --- a/aquatic_cli_helpers/Cargo.toml +++ b/aquatic_cli_helpers/Cargo.toml @@ -10,5 +10,5 @@ repository = "https://github.com/greatest-ape/aquatic" [dependencies] anyhow = "1" serde = { version = "1", features = ["derive"] } -simplelog = "0.10.0" +simplelog = "0.10" toml = "0.5" diff --git a/aquatic_common/Cargo.toml b/aquatic_common/Cargo.toml index 951b65f..b6dc1e6 100644 --- a/aquatic_common/Cargo.toml +++ b/aquatic_common/Cargo.toml @@ -17,7 +17,7 @@ cpu-pinning = ["hwloc", "libc"] ahash = "0.7" anyhow = "1" arc-swap = "1" -hashbrown = "0.11.2" +hashbrown = "0.11" hex = "0.4" indexmap-amortized = "1" log = "0.4" diff --git a/aquatic_http/Cargo.toml b/aquatic_http/Cargo.toml index 9bc4238..73aff34 100644 --- a/aquatic_http/Cargo.toml +++ b/aquatic_http/Cargo.toml @@ -42,5 +42,5 @@ slab = "0.4" smartstring = "0.2" [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_http_load_test/Cargo.toml b/aquatic_http_load_test/Cargo.toml index 889de16..7dd0d5c 100644 --- a/aquatic_http_load_test/Cargo.toml +++ b/aquatic_http_load_test/Cargo.toml @@ -18,7 +18,7 @@ aquatic_cli_helpers = "0.1.0" aquatic_common = "0.1.0" aquatic_http_protocol = "0.1.0" futures-lite = "1" -hashbrown = "0.11.2" +hashbrown = "0.11" glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5" } log = "0.4" mimalloc = { version = "0.1", default-features = false } @@ -28,5 +28,5 @@ rustls = { version = "0.20", features = ["dangerous_configuration"] } serde = { version = "1", features = ["derive"] } [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_http_protocol/Cargo.toml b/aquatic_http_protocol/Cargo.toml index 6d43207..7343913 100644 --- a/aquatic_http_protocol/Cargo.toml +++ b/aquatic_http_protocol/Cargo.toml @@ -23,7 +23,7 @@ harness = false [dependencies] anyhow = "1" -hashbrown = "0.11.2" +hashbrown = "0.11" hex = { version = "0.4", default-features = false } httparse = "1" itoa = "0.4" @@ -33,10 +33,10 @@ rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } serde_bencode = "0.2" smartstring = "0.2" -urlencoding = "2.1.0" +urlencoding = "2" [dev-dependencies] bendy = { version = "0.3", features = ["std", "serde"] } criterion = "0.3" -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index ef91567..0df4bae 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -33,8 +33,8 @@ rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } slab = "0.4" signal-hook = { version = "0.3" } -socket2 = { version = "0.4.1", features = ["all"] } +socket2 = { version = "0.4", features = ["all"] } [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_udp_bench/Cargo.toml b/aquatic_udp_bench/Cargo.toml index e2950df..48a48d0 100644 --- a/aquatic_udp_bench/Cargo.toml +++ b/aquatic_udp_bench/Cargo.toml @@ -15,7 +15,7 @@ aquatic_cli_helpers = "0.1.0" aquatic_udp = "0.1.0" aquatic_udp_protocol = "0.1.0" crossbeam-channel = "0.5" -indicatif = "0.16.2" +indicatif = "0.16" mimalloc = { version = "0.1", default-features = false } num-format = "0.4" rand = { version = "0.8", features = ["small_rng"] } diff --git a/aquatic_udp_load_test/Cargo.toml b/aquatic_udp_load_test/Cargo.toml index 52d39fa..b31850e 100644 --- a/aquatic_udp_load_test/Cargo.toml +++ b/aquatic_udp_load_test/Cargo.toml @@ -17,14 +17,14 @@ anyhow = "1" aquatic_cli_helpers = "0.1.0" aquatic_common = "0.1.0" aquatic_udp_protocol = "0.1.0" -hashbrown = "0.11.2" +hashbrown = "0.11" mimalloc = { version = "0.1", default-features = false } mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } rand = { version = "0.8", features = ["small_rng"] } rand_distr = "0.4" serde = { version = "1", features = ["derive"] } -socket2 = { version = "0.4.1", features = ["all"] } +socket2 = { version = "0.4", features = ["all"] } [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_udp_protocol/Cargo.toml b/aquatic_udp_protocol/Cargo.toml index 3cf3f43..46c8f26 100644 --- a/aquatic_udp_protocol/Cargo.toml +++ b/aquatic_udp_protocol/Cargo.toml @@ -12,5 +12,5 @@ byteorder = "1" either = "1" [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_ws/Cargo.toml b/aquatic_ws/Cargo.toml index 2fc27cf..e836dcb 100644 --- a/aquatic_ws/Cargo.toml +++ b/aquatic_ws/Cargo.toml @@ -28,7 +28,7 @@ aquatic_common = "0.1.0" aquatic_ws_protocol = "0.1.0" cfg-if = "1" either = "1" -hashbrown = { version = "0.11.2", features = ["serde"] } +hashbrown = { version = "0.11", features = ["serde"] } log = "0.4" mimalloc = { version = "0.1", default-features = false } privdrop = "0.5" @@ -44,7 +44,7 @@ histogram = { version = "0.6", optional = true } mio = { version = "0.7", features = ["tcp", "os-poll", "os-util"], optional = true } native-tls = { version = "0.2", optional = true } parking_lot = { version = "0.11", optional = true } -socket2 = { version = "0.4.1", features = ["all"], optional = true } +socket2 = { version = "0.4", features = ["all"], optional = true } # glommio async-tungstenite = { version = "0.15", optional = true } @@ -55,5 +55,5 @@ glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f rustls-pemfile = { version = "0.2", optional = true } [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_ws_load_test/Cargo.toml b/aquatic_ws_load_test/Cargo.toml index e752b9f..700b425 100644 --- a/aquatic_ws_load_test/Cargo.toml +++ b/aquatic_ws_load_test/Cargo.toml @@ -21,7 +21,7 @@ aquatic_ws_protocol = "0.1.0" futures = "0.3" futures-rustls = "0.22" glommio = { git = "https://github.com/DataDog/glommio.git", rev = "4e6b14772da2f4325271fbcf12d24cf91ed466e5" } -hashbrown = { version = "0.11.2", features = ["serde"] } +hashbrown = { version = "0.11", features = ["serde"] } mimalloc = { version = "0.1", default-features = false } rand = { version = "0.8", features = ["small_rng"] } rand_distr = "0.4" @@ -31,5 +31,5 @@ serde_json = "1" tungstenite = "0.15" [dev-dependencies] -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" diff --git a/aquatic_ws_protocol/Cargo.toml b/aquatic_ws_protocol/Cargo.toml index 1830725..b1ec8af 100644 --- a/aquatic_ws_protocol/Cargo.toml +++ b/aquatic_ws_protocol/Cargo.toml @@ -18,13 +18,13 @@ harness = false [dependencies] anyhow = "1" -hashbrown = { version = "0.11.2", features = ["serde"] } +hashbrown = { version = "0.11", features = ["serde"] } serde = { version = "1", features = ["derive"] } serde_json = "1" -simd-json = { version = "0.4.7", features = ["allow-non-simd"] } +simd-json = { version = "0.4", features = ["allow-non-simd"] } tungstenite = "0.15" [dev-dependencies] criterion = "0.3" -quickcheck = "1.0" -quickcheck_macros = "1.0" +quickcheck = "1" +quickcheck_macros = "1" From 7305a3532c6b9fdf6c20cbe2d0ace2efff847091 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 02:47:17 +0100 Subject: [PATCH 50/56] Run cargo update --- Cargo.lock | 120 +++++++++++++++++++++++------------------------------ 1 file changed, 52 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d95700c..18be0e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" dependencies = [ "gimli", ] @@ -17,12 +17,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" - [[package]] name = "ahash" version = "0.7.6" @@ -45,9 +39,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.44" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61604a8f862e1d5c3229fdd78f8b02c68dcf73a4c4b05fd636d12240aaa242c1" +checksum = "ee10e43ae4a853c0a3591d4e2ada1719e553be18199d9da9d4a83f5927c2f5c7" [[package]] name = "aquatic" @@ -74,7 +68,7 @@ dependencies = [ name = "aquatic_common" version = "0.1.0" dependencies = [ - "ahash 0.7.6", + "ahash", "anyhow", "arc-swap", "hashbrown 0.11.2", @@ -306,9 +300,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6df5aef5c5830360ce5218cecb8f018af3438af5686ae945094affc86fdec63" +checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" [[package]] name = "arrayvec" @@ -357,9 +351,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.61" +version = "0.3.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" +checksum = "321629d8ba6513061f26707241fa9bc89524ff1cd7a915a97ef0c62c666ce1b6" dependencies = [ "addr2line", "cc", @@ -434,9 +428,9 @@ checksum = "8ff9f338986406db85e2b5deb40a9255b796ca03a194c7457403d215173f3fd5" [[package]] name = "bumpalo" -version = "3.7.1" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9df67f7bf9ef8498769f994239c45613ef0c5899415fb58e9add412d2c1a538" +checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" [[package]] name = "byteorder" @@ -467,9 +461,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.71" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c2681d6594606957bbb8631c4b90a7fcaaa72cdb714743a437b156d6a7eedd" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" [[package]] name = "cfg-if" @@ -765,9 +759,9 @@ dependencies = [ [[package]] name = "float-cmp" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1267f4ac4f343772758f7b1bdcbe767c218bbab93bb432acbf5162bbf85a6c4" +checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" dependencies = [ "num-traits", ] @@ -946,16 +940,16 @@ dependencies = [ [[package]] name = "gimli" -version = "0.25.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" +checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" [[package]] name = "glommio" version = "0.6.0" source = "git+https://github.com/DataDog/glommio.git?rev=4e6b14772da2f4325271fbcf12d24cf91ed466e5#4e6b14772da2f4325271fbcf12d24cf91ed466e5" dependencies = [ - "ahash 0.7.6", + "ahash", "bitflags 1.3.2", "bitmaps", "buddy-alloc", @@ -992,30 +986,20 @@ dependencies = [ [[package]] name = "half" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac5956d4e63858efaec57e0d6c1c2f6a41e1487f830314a324ccd7e2223a7ca0" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "halfbrown" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c12499524b5585419ab2f51545a19b842263a373580a83c0eb98a0142a260a10" +checksum = "3ed39577259d319b81a15176a32673271be2786cb463889703c58c90fe83c825" dependencies = [ - "hashbrown 0.7.2", + "hashbrown 0.11.2", "serde", ] -[[package]] -name = "hashbrown" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf" -dependencies = [ - "ahash 0.3.8", - "autocfg", -] - [[package]] name = "hashbrown" version = "0.9.1" @@ -1028,7 +1012,7 @@ version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash 0.7.6", + "ahash", "serde", ] @@ -1121,9 +1105,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "716d3d89f35ac6a34fd0eed635395f4c3b76fa889338a4632e5231a8684216bd" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] @@ -1179,9 +1163,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.103" +version = "0.2.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" +checksum = "fbe5e23404da5b4f555ef85ebed98fb4083e55a00c317800bc2a50ede9f3d219" [[package]] name = "libm" @@ -1191,9 +1175,9 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libmimalloc-sys" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1b8479c593dba88c2741fc50b92e13dbabbbe0bd504d979f244ccc1a5b1c01" +checksum = "9636c194f9db483f4d0adf2f99a65011a99f904bd222bbd67fb4df4f37863c30" dependencies = [ "cc", ] @@ -1269,9 +1253,9 @@ dependencies = [ [[package]] name = "mimalloc" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb74897ce508e6c49156fd1476fc5922cbc6e75183c65e399c765a09122e5130" +checksum = "cf5f78c1d9892fb5677a8b2f543f967ab891ac0f71feecd961435b74f877283a" dependencies = [ "libmimalloc-sys", ] @@ -1288,9 +1272,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", "log", @@ -1424,9 +1408,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.26.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2" +checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" dependencies = [ "memchr", ] @@ -1471,9 +1455,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-sys" -version = "0.9.70" +version = "0.9.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6517987b3f8226b5da3661dad65ff7f300cc59fb5ea8333ca191fc65fde3edf" +checksum = "7df13d165e607909b363a4757a6f133f8a818a74e9d3a98d09c6128e15fa4c73" dependencies = [ "autocfg", "cc", @@ -1573,9 +1557,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ca011bd0129ff4ae15cd04c4eef202cadf6c51c21e47aba319b4e0501db741" +checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" [[package]] name = "privdrop" @@ -1601,9 +1585,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.30" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc3358ebc67bc8b7fa0c007f945b0b18226f78437d61bec735a9eb96b61ee70" +checksum = "ba508cc11742c0dc5c1659771673afbab7a0efab23aa17e854cbab0837ed0b43" dependencies = [ "unicode-xid", ] @@ -1796,9 +1780,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5ac6078ca424dc1d3ae2328526a76787fecc7f8011f520e3276730e711fc95" +checksum = "dac4581f0fc0e0efd529d069e8189ec7b90b8e7680e21beb35141bdc45f36040" dependencies = [ "log", "ring", @@ -1942,9 +1926,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.68" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8" +checksum = "063bf466a64011ac24040a49009724ee60a57da1b437617ceb32e53ad61bfb19" dependencies = [ "itoa", "ryu", @@ -2069,9 +2053,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "syn" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" +checksum = "f2afee18b8beb5a596ecb4a2dce128c719b4ba399d34126b9e4396e3f9860966" dependencies = [ "proc-macro2", "quote", @@ -2174,9 +2158,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83b2a3d4d9091d0abd7eba4dc2710b1718583bd4d8992e2190720ea38f391f7" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2312,9 +2296,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "value-trait" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b637f98040dfa411b01a85b238a8cadbd797b303c23007157dee4bbbd3a72af" +checksum = "0393efdd7d82f856a927b0fcafa80bca45911f5c89ef6b9d80197bebc284f72e" dependencies = [ "float-cmp", "halfbrown", From fee3c9ca4e6203d446760384087c7db90de11435 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 03:01:03 +0100 Subject: [PATCH 51/56] Update mio to version 0.8 --- Cargo.lock | 4 ++-- aquatic_udp/Cargo.toml | 2 +- aquatic_udp_load_test/Cargo.toml | 2 +- aquatic_ws/Cargo.toml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18be0e9..d6f21c3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1272,9 +1272,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" dependencies = [ "libc", "log", diff --git a/aquatic_udp/Cargo.toml b/aquatic_udp/Cargo.toml index 0df4bae..fb0f9ca 100644 --- a/aquatic_udp/Cargo.toml +++ b/aquatic_udp/Cargo.toml @@ -27,7 +27,7 @@ crossbeam-channel = "0.5" hex = "0.4" log = "0.4" mimalloc = { version = "0.1", default-features = false } -mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } +mio = { version = "0.8", features = ["net", "os-poll"] } parking_lot = "0.11" rand = { version = "0.8", features = ["small_rng"] } serde = { version = "1", features = ["derive"] } diff --git a/aquatic_udp_load_test/Cargo.toml b/aquatic_udp_load_test/Cargo.toml index b31850e..85dd579 100644 --- a/aquatic_udp_load_test/Cargo.toml +++ b/aquatic_udp_load_test/Cargo.toml @@ -19,7 +19,7 @@ aquatic_common = "0.1.0" aquatic_udp_protocol = "0.1.0" hashbrown = "0.11" mimalloc = { version = "0.1", default-features = false } -mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } +mio = { version = "0.8", features = ["net", "os-poll"] } rand = { version = "0.8", features = ["small_rng"] } rand_distr = "0.4" serde = { version = "1", features = ["derive"] } diff --git a/aquatic_ws/Cargo.toml b/aquatic_ws/Cargo.toml index e836dcb..7401f95 100644 --- a/aquatic_ws/Cargo.toml +++ b/aquatic_ws/Cargo.toml @@ -41,7 +41,7 @@ tungstenite = "0.15" # mio crossbeam-channel = { version = "0.5", optional = true } histogram = { version = "0.6", optional = true } -mio = { version = "0.7", features = ["tcp", "os-poll", "os-util"], optional = true } +mio = { version = "0.8", features = ["net", "os-poll"], optional = true } native-tls = { version = "0.2", optional = true } parking_lot = { version = "0.11", optional = true } socket2 = { version = "0.4", features = ["all"], optional = true } From 31e44db469dfd3d0679db1a59967edacf788164f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 03:06:02 +0100 Subject: [PATCH 52/56] Upgrade simplelog to version 0.11 --- Cargo.lock | 4 ++-- aquatic_cli_helpers/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d6f21c3..7fa5c08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1988,9 +1988,9 @@ checksum = "c970da16e7c682fa90a261cf0724dee241c9f7831635ecc4e988ae8f3b505559" [[package]] name = "simplelog" -version = "0.10.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85d04ae642154220ef00ee82c36fb07853c10a4f2a0ca6719f9991211d2eb959" +checksum = "8baa24de25f3092d9697c76f94cf09f67fca13db2ea11ce80c2f055c1aaf0795" dependencies = [ "chrono", "log", diff --git a/aquatic_cli_helpers/Cargo.toml b/aquatic_cli_helpers/Cargo.toml index 2ea0f5f..3d0b745 100644 --- a/aquatic_cli_helpers/Cargo.toml +++ b/aquatic_cli_helpers/Cargo.toml @@ -10,5 +10,5 @@ repository = "https://github.com/greatest-ape/aquatic" [dependencies] anyhow = "1" serde = { version = "1", features = ["derive"] } -simplelog = "0.10" +simplelog = "0.11" toml = "0.5" From 59e95894b929ca1bbddc054d6b48bc38f02c40d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 12:04:16 +0100 Subject: [PATCH 53/56] udp: statistics: show number of torrents and access list len --- TODO.md | 1 - aquatic_common/src/access_list.rs | 4 ++++ aquatic_udp/src/lib/common.rs | 28 ++++++++++++++++++++++++---- aquatic_udp/src/lib/handlers.rs | 17 ++++++++++++++++- aquatic_udp/src/lib/lib.rs | 12 ++++++++++-- aquatic_udp/src/lib/tasks.rs | 22 ++++++++++++++++++++++ aquatic_udp_bench/src/main.rs | 9 ++++++++- 7 files changed, 84 insertions(+), 9 deletions(-) diff --git a/TODO.md b/TODO.md index ed8ba7f..5927ae5 100644 --- a/TODO.md +++ b/TODO.md @@ -19,7 +19,6 @@ * aquatic_udp * look at proper cpu pinning (check that one thread gets bound per core) * then consider so_attach_reuseport_cbpf - * implement statistics for total number of torrents and peers again? * what poll event capacity is actually needed? * stagger connection cleaning intervals? * notes diff --git a/aquatic_common/src/access_list.rs b/aquatic_common/src/access_list.rs index 54cfd21..f5a1076 100644 --- a/aquatic_common/src/access_list.rs +++ b/aquatic_common/src/access_list.rs @@ -77,6 +77,10 @@ impl AccessList { AccessListMode::Off => true, } } + + pub fn len(&self) -> usize { + self.0.len() + } } pub trait AccessListQuery { diff --git a/aquatic_udp/src/lib/common.rs b/aquatic_udp/src/lib/common.rs index e9fc24e..41381ed 100644 --- a/aquatic_udp/src/lib/common.rs +++ b/aquatic_udp/src/lib/common.rs @@ -227,12 +227,32 @@ impl TorrentMaps { } } -#[derive(Default)] pub struct Statistics { pub requests_received: AtomicUsize, pub responses_sent: AtomicUsize, pub bytes_received: AtomicUsize, pub bytes_sent: AtomicUsize, + pub torrents_ipv4: Vec, + pub torrents_ipv6: Vec, +} + +impl Statistics { + pub fn new(num_request_workers: usize) -> Self { + Self { + requests_received: Default::default(), + responses_sent: Default::default(), + bytes_received: Default::default(), + bytes_sent: Default::default(), + torrents_ipv4: Self::create_atomic_usize_vec(num_request_workers), + torrents_ipv6: Self::create_atomic_usize_vec(num_request_workers), + } + } + + fn create_atomic_usize_vec(len: usize) -> Vec { + ::std::iter::repeat_with(|| AtomicUsize::default()) + .take(len) + .collect() + } } #[derive(Clone)] @@ -241,11 +261,11 @@ pub struct State { pub statistics: Arc, } -impl Default for State { - fn default() -> Self { +impl State { + pub fn new(num_request_workers: usize) -> Self { Self { access_list: Arc::new(AccessListArcSwap::default()), - statistics: Arc::new(Statistics::default()), + statistics: Arc::new(Statistics::new(num_request_workers)), } } } diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index 5653290..5cc38f0 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -3,6 +3,7 @@ use std::net::IpAddr; use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::net::SocketAddr; +use std::sync::atomic::Ordering; use std::time::Duration; use std::time::Instant; @@ -84,6 +85,7 @@ pub fn run_request_worker( state: State, request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>, response_sender: ConnectedResponseSender, + worker_index: RequestWorkerIndex, ) { let mut torrents = TorrentMaps::default(); let mut small_rng = SmallRng::from_entropy(); @@ -92,9 +94,12 @@ pub fn run_request_worker( let mut peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age); let cleaning_interval = Duration::from_secs(config.cleaning.torrent_cleaning_interval); + let statistics_update_interval = Duration::from_secs(config.statistics.interval); + + let mut last_cleaning = Instant::now(); + let mut last_statistics_update = Instant::now(); let mut iter_counter = 0usize; - let mut last_cleaning = Instant::now(); loop { if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) { @@ -125,6 +130,16 @@ pub fn run_request_worker( last_cleaning = now; } + if !statistics_update_interval.is_zero() + && now > last_statistics_update + statistics_update_interval + { + state.statistics.torrents_ipv4[worker_index.0] + .store(torrents.ipv4.len(), Ordering::SeqCst); + state.statistics.torrents_ipv6[worker_index.0] + .store(torrents.ipv6.len(), Ordering::SeqCst); + + last_statistics_update = now; + } } iter_counter = iter_counter.wrapping_add(1); diff --git a/aquatic_udp/src/lib/lib.rs b/aquatic_udp/src/lib/lib.rs index f42c377..f9ecb93 100644 --- a/aquatic_udp/src/lib/lib.rs +++ b/aquatic_udp/src/lib/lib.rs @@ -23,10 +23,12 @@ use signal_hook::iterator::Signals; use common::{ConnectedRequestSender, ConnectedResponseSender, SocketWorkerIndex, State}; +use crate::common::RequestWorkerIndex; + pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker"; pub fn run(config: Config) -> ::anyhow::Result<()> { - let state = State::default(); + let state = State::new(config.request_workers); update_access_list(&config.access_list, &state.access_list)?; @@ -70,7 +72,13 @@ pub fn run(config: Config) -> ::anyhow::Result<()> { WorkerIndex::RequestWorker(i), ); - handlers::run_request_worker(config, state, request_receiver, response_sender) + handlers::run_request_worker( + config, + state, + request_receiver, + response_sender, + RequestWorkerIndex(i), + ) }) .with_context(|| "spawn request worker")?; } diff --git a/aquatic_udp/src/lib/tasks.rs b/aquatic_udp/src/lib/tasks.rs index 8e22560..b175baa 100644 --- a/aquatic_udp/src/lib/tasks.rs +++ b/aquatic_udp/src/lib/tasks.rs @@ -25,6 +25,21 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { let bytes_received_per_second: f64 = bytes_received / interval as f64; let bytes_sent_per_second: f64 = bytes_sent / interval as f64; + let num_torrents_ipv4: usize = state + .statistics + .torrents_ipv4 + .iter() + .map(|n| n.load(Ordering::SeqCst)) + .sum(); + let num_torrents_ipv6: usize = state + .statistics + .torrents_ipv6 + .iter() + .map(|n| n.load(Ordering::SeqCst)) + .sum(); + + let access_list_len = state.access_list.load().len(); + println!( "stats: {:.2} requests/second, {:.2} responses/second", requests_per_second, responses_per_second @@ -36,5 +51,12 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { bytes_sent_per_second * 8.0 / 1_000_000.0, ); + println!( + "ipv4 torrents: {}, ipv6 torrents: {}", + num_torrents_ipv4, num_torrents_ipv6, + ); + + println!("access list entries: {}", access_list_len,); + println!(); } diff --git a/aquatic_udp_bench/src/main.rs b/aquatic_udp_bench/src/main.rs index 6c77bee..c0258e1 100644 --- a/aquatic_udp_bench/src/main.rs +++ b/aquatic_udp_bench/src/main.rs @@ -50,9 +50,16 @@ pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> { { let config = aquatic_config.clone(); + let state = State::new(config.request_workers); ::std::thread::spawn(move || { - run_request_worker(config, State::default(), request_receiver, response_sender) + run_request_worker( + config, + state, + request_receiver, + response_sender, + RequestWorkerIndex(0), + ) }); } From c78716153b8f48e9d36441de6d7aa442107b6317 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 12:17:39 +0100 Subject: [PATCH 54/56] udp: statistics: show number of peers --- aquatic_udp/src/lib/common.rs | 4 ++++ aquatic_udp/src/lib/handlers.rs | 8 ++++++++ aquatic_udp/src/lib/tasks.rs | 12 +++++++++++- 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/aquatic_udp/src/lib/common.rs b/aquatic_udp/src/lib/common.rs index 41381ed..2a87296 100644 --- a/aquatic_udp/src/lib/common.rs +++ b/aquatic_udp/src/lib/common.rs @@ -234,6 +234,8 @@ pub struct Statistics { pub bytes_sent: AtomicUsize, pub torrents_ipv4: Vec, pub torrents_ipv6: Vec, + pub peers_ipv4: Vec, + pub peers_ipv6: Vec, } impl Statistics { @@ -245,6 +247,8 @@ impl Statistics { bytes_sent: Default::default(), torrents_ipv4: Self::create_atomic_usize_vec(num_request_workers), torrents_ipv6: Self::create_atomic_usize_vec(num_request_workers), + peers_ipv4: Self::create_atomic_usize_vec(num_request_workers), + peers_ipv6: Self::create_atomic_usize_vec(num_request_workers), } } diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index 5cc38f0..f333f2f 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -128,6 +128,14 @@ pub fn run_request_worker( if now > last_cleaning + cleaning_interval { torrents.clean(&config, &state.access_list); + if !statistics_update_interval.is_zero() { + let peers_ipv4 = torrents.ipv4.values().map(|t| t.peers.len()).sum(); + let peers_ipv6 = torrents.ipv6.values().map(|t| t.peers.len()).sum(); + + state.statistics.peers_ipv4[worker_index.0].store(peers_ipv4, Ordering::SeqCst); + state.statistics.peers_ipv6[worker_index.0].store(peers_ipv6, Ordering::SeqCst); + } + last_cleaning = now; } if !statistics_update_interval.is_zero() diff --git a/aquatic_udp/src/lib/tasks.rs b/aquatic_udp/src/lib/tasks.rs index b175baa..5ca61ce 100644 --- a/aquatic_udp/src/lib/tasks.rs +++ b/aquatic_udp/src/lib/tasks.rs @@ -1,4 +1,4 @@ -use std::sync::atomic::Ordering; +use std::sync::atomic::{AtomicUsize, Ordering}; use super::common::*; use crate::config::Config; @@ -37,6 +37,8 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { .iter() .map(|n| n.load(Ordering::SeqCst)) .sum(); + let num_peers_ipv4 = sum_atomic_usize_vec(&state.statistics.peers_ipv4); + let num_peers_ipv6 = sum_atomic_usize_vec(&state.statistics.peers_ipv6); let access_list_len = state.access_list.load().len(); @@ -55,8 +57,16 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { "ipv4 torrents: {}, ipv6 torrents: {}", num_torrents_ipv4, num_torrents_ipv6, ); + println!( + "ipv4 peers: {}, ipv6 peers: {} (both updated every {} seconds)", + num_peers_ipv4, num_peers_ipv6, config.cleaning.torrent_cleaning_interval + ); println!("access list entries: {}", access_list_len,); println!(); } + +fn sum_atomic_usize_vec(vec: &Vec) -> usize { + vec.iter().map(|n| n.load(Ordering::SeqCst)).sum() +} From 2c336793b1027235f9523a12b671fd80a3075fef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 12:22:11 +0100 Subject: [PATCH 55/56] udp: statistics: improve atomic usize vec sum code --- aquatic_udp/src/lib/tasks.rs | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/aquatic_udp/src/lib/tasks.rs b/aquatic_udp/src/lib/tasks.rs index 5ca61ce..ffdf1c3 100644 --- a/aquatic_udp/src/lib/tasks.rs +++ b/aquatic_udp/src/lib/tasks.rs @@ -25,20 +25,10 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { let bytes_received_per_second: f64 = bytes_received / interval as f64; let bytes_sent_per_second: f64 = bytes_sent / interval as f64; - let num_torrents_ipv4: usize = state - .statistics - .torrents_ipv4 - .iter() - .map(|n| n.load(Ordering::SeqCst)) - .sum(); - let num_torrents_ipv6: usize = state - .statistics - .torrents_ipv6 - .iter() - .map(|n| n.load(Ordering::SeqCst)) - .sum(); - let num_peers_ipv4 = sum_atomic_usize_vec(&state.statistics.peers_ipv4); - let num_peers_ipv6 = sum_atomic_usize_vec(&state.statistics.peers_ipv6); + let num_torrents_ipv4: usize = sum_atomic_usizes(&state.statistics.torrents_ipv4); + let num_torrents_ipv6 = sum_atomic_usizes(&state.statistics.torrents_ipv6); + let num_peers_ipv4 = sum_atomic_usizes(&state.statistics.peers_ipv4); + let num_peers_ipv6 = sum_atomic_usizes(&state.statistics.peers_ipv6); let access_list_len = state.access_list.load().len(); @@ -67,6 +57,6 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { println!(); } -fn sum_atomic_usize_vec(vec: &Vec) -> usize { - vec.iter().map(|n| n.load(Ordering::SeqCst)).sum() +fn sum_atomic_usizes(values: &[AtomicUsize]) -> usize { + values.iter().map(|n| n.load(Ordering::SeqCst)).sum() } From a1243c59d6d1d173171d0457a0a1879e5dd6dfca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joakim=20Frosteg=C3=A5rd?= Date: Fri, 19 Nov 2021 12:30:05 +0100 Subject: [PATCH 56/56] udp: avoid Ordering::SeqCst for atomic operations --- aquatic_udp/src/lib/handlers.rs | 10 ++++++---- aquatic_udp/src/lib/network.rs | 8 ++++---- aquatic_udp/src/lib/tasks.rs | 10 +++++----- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/aquatic_udp/src/lib/handlers.rs b/aquatic_udp/src/lib/handlers.rs index f333f2f..0bc85bb 100644 --- a/aquatic_udp/src/lib/handlers.rs +++ b/aquatic_udp/src/lib/handlers.rs @@ -132,8 +132,10 @@ pub fn run_request_worker( let peers_ipv4 = torrents.ipv4.values().map(|t| t.peers.len()).sum(); let peers_ipv6 = torrents.ipv6.values().map(|t| t.peers.len()).sum(); - state.statistics.peers_ipv4[worker_index.0].store(peers_ipv4, Ordering::SeqCst); - state.statistics.peers_ipv6[worker_index.0].store(peers_ipv6, Ordering::SeqCst); + state.statistics.peers_ipv4[worker_index.0] + .store(peers_ipv4, Ordering::Release); + state.statistics.peers_ipv6[worker_index.0] + .store(peers_ipv6, Ordering::Release); } last_cleaning = now; @@ -142,9 +144,9 @@ pub fn run_request_worker( && now > last_statistics_update + statistics_update_interval { state.statistics.torrents_ipv4[worker_index.0] - .store(torrents.ipv4.len(), Ordering::SeqCst); + .store(torrents.ipv4.len(), Ordering::Release); state.statistics.torrents_ipv6[worker_index.0] - .store(torrents.ipv6.len(), Ordering::SeqCst); + .store(torrents.ipv6.len(), Ordering::Release); last_statistics_update = now; } diff --git a/aquatic_udp/src/lib/network.rs b/aquatic_udp/src/lib/network.rs index d1727cc..af1f56b 100644 --- a/aquatic_udp/src/lib/network.rs +++ b/aquatic_udp/src/lib/network.rs @@ -287,11 +287,11 @@ fn read_requests( state .statistics .requests_received - .fetch_add(requests_received, Ordering::SeqCst); + .fetch_add(requests_received, Ordering::Release); state .statistics .bytes_received - .fetch_add(bytes_received, Ordering::SeqCst); + .fetch_add(bytes_received, Ordering::Release); } } @@ -451,11 +451,11 @@ fn send_responses( state .statistics .responses_sent - .fetch_add(responses_sent, Ordering::SeqCst); + .fetch_add(responses_sent, Ordering::Release); state .statistics .bytes_sent - .fetch_add(bytes_sent, Ordering::SeqCst); + .fetch_add(bytes_sent, Ordering::Release); } } diff --git a/aquatic_udp/src/lib/tasks.rs b/aquatic_udp/src/lib/tasks.rs index ffdf1c3..6624a5d 100644 --- a/aquatic_udp/src/lib/tasks.rs +++ b/aquatic_udp/src/lib/tasks.rs @@ -9,16 +9,16 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { let requests_received: f64 = state .statistics .requests_received - .fetch_and(0, Ordering::SeqCst) as f64; + .fetch_and(0, Ordering::AcqRel) as f64; let responses_sent: f64 = state .statistics .responses_sent - .fetch_and(0, Ordering::SeqCst) as f64; + .fetch_and(0, Ordering::AcqRel) as f64; let bytes_received: f64 = state .statistics .bytes_received - .fetch_and(0, Ordering::SeqCst) as f64; - let bytes_sent: f64 = state.statistics.bytes_sent.fetch_and(0, Ordering::SeqCst) as f64; + .fetch_and(0, Ordering::AcqRel) as f64; + let bytes_sent: f64 = state.statistics.bytes_sent.fetch_and(0, Ordering::AcqRel) as f64; let requests_per_second = requests_received / interval as f64; let responses_per_second: f64 = responses_sent / interval as f64; @@ -58,5 +58,5 @@ pub fn gather_and_print_statistics(state: &State, config: &Config) { } fn sum_atomic_usizes(values: &[AtomicUsize]) -> usize { - values.iter().map(|n| n.load(Ordering::SeqCst)).sum() + values.iter().map(|n| n.load(Ordering::Acquire)).sum() }