rename aquatic to aquatic_udp, same for bench and load test crates

This commit is contained in:
Joakim Frostegård 2020-05-11 16:55:46 +02:00
parent f614bab03d
commit 1b8d74e26d
35 changed files with 53 additions and 53 deletions

32
aquatic_udp/Cargo.toml Normal file
View file

@ -0,0 +1,32 @@
[package]
name = "aquatic_udp"
version = "0.1.0"
authors = ["Joakim Frostegård <joakim.frostegard@gmail.com>"]
edition = "2018"
license = "Apache-2.0"
[lib]
name = "aquatic_udp"
path = "src/lib/lib.rs"
[[bin]]
name = "aquatic_udp"
[dependencies]
bittorrent_udp = { path = "../bittorrent_udp" }
cli_helpers = { path = "../cli_helpers" }
crossbeam-channel = "0.4"
hashbrown = "0.7"
histogram = "0.6"
indexmap = "1"
mimalloc = { version = "0.1", default-features = false }
mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] }
net2 = "0.2"
parking_lot = "0.10"
privdrop = "0.3"
rand = { version = "0.7", features = ["small_rng"] }
serde = { version = "1", features = ["derive"] }
[dev-dependencies]
quickcheck = "0.9"
quickcheck_macros = "0.9"

View file

@ -0,0 +1,14 @@
use aquatic_udp;
use cli_helpers;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn main(){
cli_helpers::run_app_with_cli_and_config::<aquatic_udp::config::Config>(
"aquatic: udp bittorrent tracker",
aquatic_udp::run,
)
}

View file

@ -0,0 +1,170 @@
use std::net::{SocketAddr, IpAddr};
use std::sync::{Arc, atomic::AtomicUsize};
use std::time::{Duration, Instant};
use hashbrown::HashMap;
use indexmap::IndexMap;
use parking_lot::Mutex;
pub use bittorrent_udp::types::*;
pub const MAX_PACKET_SIZE: usize = 4096;
/// Peer or connection valid until this instant
///
/// Used instead of "last seen" or similar to hopefully prevent arithmetic
/// overflow when cleaning.
#[derive(Debug, Clone, Copy)]
pub struct ValidUntil(pub Instant);
impl ValidUntil {
pub fn new(offset_seconds: u64) -> Self {
Self(Instant::now() + Duration::from_secs(offset_seconds))
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ConnectionKey {
pub connection_id: ConnectionId,
pub socket_addr: SocketAddr
}
pub type ConnectionMap = HashMap<ConnectionKey, ValidUntil>;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum PeerStatus {
Seeding,
Leeching,
Stopped
}
impl PeerStatus {
/// Determine peer status from announce event and number of bytes left.
///
/// Likely, the last branch will be taken most of the time.
#[inline]
pub fn from_event_and_bytes_left(
event: AnnounceEvent,
bytes_left: NumberOfBytes
) -> Self {
if event == AnnounceEvent::Stopped {
Self::Stopped
} else if bytes_left.0 == 0 {
Self::Seeding
} else {
Self::Leeching
}
}
}
#[derive(Clone, Debug)]
pub struct Peer {
pub ip_address: IpAddr,
pub port: Port,
pub status: PeerStatus,
pub valid_until: ValidUntil
}
impl Peer {
#[inline(always)]
pub fn to_response_peer(&self) -> ResponsePeer {
ResponsePeer {
ip_address: self.ip_address,
port: self.port
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct PeerMapKey {
pub ip: IpAddr,
pub peer_id: PeerId
}
pub type PeerMap = IndexMap<PeerMapKey, Peer>;
pub struct TorrentData {
pub peers: PeerMap,
pub num_seeders: usize,
pub num_leechers: usize,
}
impl Default for TorrentData {
fn default() -> Self {
Self {
peers: IndexMap::new(),
num_seeders: 0,
num_leechers: 0,
}
}
}
pub type TorrentMap = HashMap<InfoHash, TorrentData>;
#[derive(Default)]
pub struct Statistics {
pub requests_received: AtomicUsize,
pub responses_sent: AtomicUsize,
pub readable_events: AtomicUsize,
pub bytes_received: AtomicUsize,
pub bytes_sent: AtomicUsize,
}
#[derive(Clone)]
pub struct State {
pub connections: Arc<Mutex<ConnectionMap>>,
pub torrents: Arc<Mutex<TorrentMap>>,
pub statistics: Arc<Statistics>,
}
impl State {
pub fn new() -> Self {
Self {
connections: Arc::new(Mutex::new(HashMap::new())),
torrents: Arc::new(Mutex::new(HashMap::new())),
statistics: Arc::new(Statistics::default()),
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_peer_status_from_event_and_bytes_left(){
use crate::common::*;
use PeerStatus::*;
let f = PeerStatus::from_event_and_bytes_left;
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes(0)));
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes(1)));
assert_eq!(Seeding, f(AnnounceEvent::Started, NumberOfBytes(0)));
assert_eq!(Leeching, f(AnnounceEvent::Started, NumberOfBytes(1)));
assert_eq!(Seeding, f(AnnounceEvent::Completed, NumberOfBytes(0)));
assert_eq!(Leeching, f(AnnounceEvent::Completed, NumberOfBytes(1)));
assert_eq!(Seeding, f(AnnounceEvent::None, NumberOfBytes(0)));
assert_eq!(Leeching, f(AnnounceEvent::None, NumberOfBytes(1)));
}
}

View file

@ -0,0 +1,163 @@
use std::net::SocketAddr;
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Socket workers receive requests from the socket, parse them and send
/// them on to the request workers. They then recieve responses from the
/// request workers, encode them and send them back over the socket.
pub socket_workers: usize,
/// Request workers receive a number of requests from socket workers,
/// generate responses and send them back to the socket workers.
pub request_workers: usize,
pub network: NetworkConfig,
pub handlers: HandlerConfig,
pub statistics: StatisticsConfig,
pub cleaning: CleaningConfig,
pub privileges: PrivilegeConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct NetworkConfig {
/// Bind to this address
pub address: SocketAddr,
/// Maximum number of torrents to accept in scrape request
pub max_scrape_torrents: u8,
/// Maximum number of peers to return in announce response
pub max_response_peers: usize,
/// Ask peers to announce this often (seconds)
pub peer_announce_interval: i32,
/// Size of socket recv buffer. Use 0 for OS default.
///
/// This setting can have a big impact on dropped packages. It might
/// require changing system defaults. Some examples of commands to set
/// recommended values for different operating systems:
///
/// macOS:
/// $ sudo sysctl net.inet.udp.recvspace=6000000
/// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended
/// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended
///
/// Linux:
/// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600
pub socket_recv_buffer_size: usize,
pub poll_event_capacity: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
/// Maximum number of requests to receive from channel before locking
/// mutex and starting work
pub max_requests_per_iter: usize,
pub channel_recv_timeout_microseconds: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct StatisticsConfig {
/// Print statistics this often (seconds). Don't print when set to zero.
pub interval: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct CleaningConfig {
/// Clean torrents and connections this often (seconds)
pub interval: u64,
/// Remove peers that haven't announced for this long (seconds)
pub max_peer_age: u64,
/// Remove connections that are older than this (seconds)
pub max_connection_age: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct PrivilegeConfig {
/// Chroot and switch user after binding to sockets
pub drop_privileges: bool,
/// Chroot to this path
pub chroot_path: String,
/// User to switch to after chrooting
pub user: String,
}
impl Default for Config {
fn default() -> Self {
Self {
socket_workers: 1,
request_workers: 1,
network: NetworkConfig::default(),
handlers: HandlerConfig::default(),
statistics: StatisticsConfig::default(),
cleaning: CleaningConfig::default(),
privileges: PrivilegeConfig::default(),
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
address: SocketAddr::from(([127, 0, 0, 1], 3000)),
max_scrape_torrents: 255,
max_response_peers: 255,
peer_announce_interval: 60 * 15,
poll_event_capacity: 4096,
socket_recv_buffer_size: 4096 * 128,
}
}
}
impl Default for HandlerConfig {
fn default() -> Self {
Self {
max_requests_per_iter: 10000,
channel_recv_timeout_microseconds: 200,
}
}
}
impl Default for StatisticsConfig {
fn default() -> Self {
Self {
interval: 5,
}
}
}
impl Default for CleaningConfig {
fn default() -> Self {
Self {
interval: 30,
max_peer_age: 60 * 20,
max_connection_age: 60 * 5,
}
}
}
impl Default for PrivilegeConfig {
fn default() -> Self {
Self {
drop_privileges: false,
chroot_path: ".".to_string(),
user: "nobody".to_string(),
}
}
}

View file

@ -0,0 +1,477 @@
use std::net::SocketAddr;
use std::time::Duration;
use std::vec::Drain;
use crossbeam_channel::{Sender, Receiver};
use indexmap::IndexMap;
use parking_lot::MutexGuard;
use rand::{SeedableRng, Rng, rngs::{SmallRng, StdRng}};
use bittorrent_udp::types::*;
use crate::common::*;
use crate::config::Config;
pub fn run_request_worker(
state: State,
config: Config,
request_receiver: Receiver<(Request, SocketAddr)>,
response_sender: Sender<(Response, SocketAddr)>,
){
let mut connect_requests: Vec<(ConnectRequest, SocketAddr)> = Vec::new();
let mut announce_requests: Vec<(AnnounceRequest, SocketAddr)> = Vec::new();
let mut scrape_requests: Vec<(ScrapeRequest, SocketAddr)> = Vec::new();
let mut responses: Vec<(Response, SocketAddr)> = Vec::new();
let mut std_rng = StdRng::from_entropy();
let mut small_rng = SmallRng::from_rng(&mut std_rng).unwrap();
let timeout = Duration::from_micros(
config.handlers.channel_recv_timeout_microseconds
);
loop {
let mut opt_connections = None;
// Collect requests from channel, divide them by type
//
// Collect a maximum number of request. Stop collecting before that
// number is reached if having waited for too long for a request, but
// only if ConnectionMap mutex isn't locked.
for i in 0..config.handlers.max_requests_per_iter {
let (request, src): (Request, SocketAddr) = if i == 0 {
match request_receiver.recv(){
Ok(r) => r,
Err(_) => break, // Really shouldn't happen
}
} else {
match request_receiver.recv_timeout(timeout){
Ok(r) => r,
Err(_) => {
if let Some(guard) = state.connections.try_lock(){
opt_connections = Some(guard);
break
} else {
continue
}
},
}
};
match request {
Request::Connect(r) => {
connect_requests.push((r, src))
},
Request::Announce(r) => {
announce_requests.push((r, src))
},
Request::Scrape(r) => {
scrape_requests.push((r, src))
},
}
}
let mut connections: MutexGuard<ConnectionMap> = opt_connections.unwrap_or_else(||
state.connections.lock()
);
handle_connect_requests(
&config,
&mut connections,
&mut std_rng,
connect_requests.drain(..),
&mut responses
);
announce_requests.retain(|(request, src)| {
let connection_key = ConnectionKey {
connection_id: request.connection_id,
socket_addr: *src,
};
if connections.contains_key(&connection_key){
true
} else {
let response = ErrorResponse {
transaction_id: request.transaction_id,
message: "Connection invalid or expired".to_string()
};
responses.push((response.into(), *src));
false
}
});
scrape_requests.retain(|(request, src)| {
let connection_key = ConnectionKey {
connection_id: request.connection_id,
socket_addr: *src,
};
if connections.contains_key(&connection_key){
true
} else {
let response = ErrorResponse {
transaction_id: request.transaction_id,
message: "Connection invalid or expired".to_string()
};
responses.push((response.into(), *src));
false
}
});
::std::mem::drop(connections);
if !(announce_requests.is_empty() && scrape_requests.is_empty()){
let mut torrents = state.torrents.lock();
handle_announce_requests(
&config,
&mut torrents,
&mut small_rng,
announce_requests.drain(..),
&mut responses
);
handle_scrape_requests(
&mut torrents,
scrape_requests.drain(..),
&mut responses
);
}
for r in responses.drain(..){
if let Err(err) = response_sender.send(r){
eprintln!("error sending response to channel: {}", err);
}
}
}
}
#[inline]
pub fn handle_connect_requests(
config: &Config,
connections: &mut MutexGuard<ConnectionMap>,
rng: &mut StdRng,
requests: Drain<(ConnectRequest, SocketAddr)>,
responses: &mut Vec<(Response, SocketAddr)>,
){
let valid_until = ValidUntil::new(config.cleaning.max_connection_age);
responses.extend(requests.map(|(request, src)| {
let connection_id = ConnectionId(rng.gen());
let key = ConnectionKey {
connection_id,
socket_addr: src,
};
connections.insert(key, valid_until);
let response = Response::Connect(
ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
}
);
(response, src)
}));
}
#[inline]
pub fn handle_announce_requests(
config: &Config,
torrents: &mut MutexGuard<TorrentMap>,
rng: &mut SmallRng,
requests: Drain<(AnnounceRequest, SocketAddr)>,
responses: &mut Vec<(Response, SocketAddr)>,
){
let peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age);
responses.extend(requests.map(|(request, src)| {
let peer_ip = src.ip();
let peer_key = PeerMapKey {
ip: peer_ip,
peer_id: request.peer_id,
};
let peer_status = PeerStatus::from_event_and_bytes_left(
request.event,
request.bytes_left
);
let peer = Peer {
ip_address: peer_ip,
port: request.port,
status: peer_status,
valid_until: peer_valid_until,
};
let torrent_data = torrents
.entry(request.info_hash)
.or_default();
let opt_removed_peer = match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
torrent_data.peers.insert(peer_key, peer)
},
PeerStatus::Seeding => {
torrent_data.num_seeders += 1;
torrent_data.peers.insert(peer_key, peer)
},
PeerStatus::Stopped => {
torrent_data.peers.remove(&peer_key)
}
};
match opt_removed_peer.map(|peer| peer.status){
Some(PeerStatus::Leeching) => {
torrent_data.num_leechers -= 1;
},
Some(PeerStatus::Seeding) => {
torrent_data.num_seeders -= 1;
},
_ => {}
}
let max_num_peers_to_take = calc_max_num_peers_to_take(
config,
request.peers_wanted.0
);
let response_peers = extract_response_peers(
rng,
&torrent_data.peers,
max_num_peers_to_take,
Peer::to_response_peer
);
let response = Response::Announce(AnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(config.network.peer_announce_interval),
leechers: NumberOfPeers(torrent_data.num_leechers as i32),
seeders: NumberOfPeers(torrent_data.num_seeders as i32),
peers: response_peers
});
(response, src)
}));
}
#[inline]
pub fn handle_scrape_requests(
torrents: &mut MutexGuard<TorrentMap>,
requests: Drain<(ScrapeRequest, SocketAddr)>,
responses: &mut Vec<(Response, SocketAddr)>,
){
let empty_stats = create_torrent_scrape_statistics(0, 0);
responses.extend(requests.map(|(request, src)|{
let mut stats: Vec<TorrentScrapeStatistics> = Vec::with_capacity(
request.info_hashes.len()
);
for info_hash in request.info_hashes.iter() {
if let Some(torrent_data) = torrents.get(info_hash){
stats.push(create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
));
} else {
stats.push(empty_stats);
}
}
let response = Response::Scrape(ScrapeResponse {
transaction_id: request.transaction_id,
torrent_stats: stats,
});
(response, src)
}));
}
/// Extract response peers
///
/// If there are more peers in map than `max_num_peers_to_take`, do a
/// half-random selection of peers from first and second halves of map,
/// in order to avoid returning too homogeneous peers.
///
/// Don't care if we send back announcing peer.
#[inline]
pub fn extract_response_peers<K, V, R, F>(
rng: &mut impl Rng,
peer_map: &IndexMap<K, V>,
max_num_peers_to_take: usize,
peer_conversion_function: F
) -> Vec<R>
where
K: Eq + ::std::hash::Hash,
F: Fn(&V) -> R
{
let peer_map_len = peer_map.len();
if peer_map_len <= max_num_peers_to_take {
peer_map.values()
.map(peer_conversion_function)
.collect()
} else {
let half_num_to_take = max_num_peers_to_take / 2;
let half_peer_map_len = peer_map_len / 2;
let offset_first_half = rng.gen_range(
0,
(half_peer_map_len + (peer_map_len % 2)) - half_num_to_take
);
let offset_second_half = rng.gen_range(
half_peer_map_len,
peer_map_len - half_num_to_take
);
let end_first_half = offset_first_half + half_num_to_take;
let end_second_half = offset_second_half + half_num_to_take + (max_num_peers_to_take % 2);
let mut peers: Vec<R> = Vec::with_capacity(max_num_peers_to_take);
for i in offset_first_half..end_first_half {
if let Some((_, peer)) = peer_map.get_index(i){
peers.push(peer_conversion_function(peer))
}
}
for i in offset_second_half..end_second_half {
if let Some((_, peer)) = peer_map.get_index(i){
peers.push(peer_conversion_function(peer))
}
}
debug_assert_eq!(peers.len(), max_num_peers_to_take);
peers
}
}
#[inline]
fn calc_max_num_peers_to_take(
config: &Config,
peers_wanted: i32,
) -> usize {
if peers_wanted <= 0 {
config.network.max_response_peers as usize
} else {
::std::cmp::min(
config.network.max_response_peers as usize,
peers_wanted as usize
)
}
}
#[inline(always)]
pub fn create_torrent_scrape_statistics(
seeders: i32,
leechers: i32
) -> TorrentScrapeStatistics {
TorrentScrapeStatistics {
seeders: NumberOfPeers(seeders),
completed: NumberOfDownloads(0), // No implementation planned
leechers: NumberOfPeers(leechers)
}
}
#[cfg(test)]
mod tests {
use std::net::IpAddr;
use std::collections::HashSet;
use indexmap::IndexMap;
use rand::thread_rng;
use quickcheck::{TestResult, quickcheck};
use super::*;
fn gen_peer_map_key_and_value(i: u32) -> (PeerMapKey, Peer) {
let ip_address = IpAddr::from(i.to_be_bytes());
let peer_id = PeerId([0; 20]);
let key = PeerMapKey {
ip: ip_address,
peer_id,
};
let value = Peer {
ip_address,
port: Port(1),
status: PeerStatus::Leeching,
valid_until: ValidUntil::new(0),
};
(key, value)
}
#[test]
fn test_extract_response_peers(){
fn prop(data: (u32, u16)) -> TestResult {
let gen_num_peers = data.0;
let req_num_peers = data.1 as usize;
let mut peer_map: PeerMap = IndexMap::new();
for i in 0..gen_num_peers {
let (key, value) = gen_peer_map_key_and_value(i);
peer_map.insert(key, value);
}
let mut rng = thread_rng();
let peers = extract_response_peers(
&mut rng,
&peer_map,
req_num_peers,
Peer::to_response_peer
);
// Check that number of returned peers is correct
let mut success = peers.len() <= req_num_peers;
if req_num_peers >= gen_num_peers as usize {
success &= peers.len() == gen_num_peers as usize;
}
// Check that returned peers are unique (no overlap)
let mut ip_addresses = HashSet::new();
for peer in peers {
if ip_addresses.contains(&peer.ip_address){
success = false;
break;
}
ip_addresses.insert(peer.ip_address);
}
TestResult::from_bool(success)
}
quickcheck(prop as fn((u32, u16)) -> TestResult);
}
}

View file

@ -0,0 +1,99 @@
use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
use std::time::Duration;
use std::thread::Builder;
use crossbeam_channel::unbounded;
use privdrop::PrivDrop;
pub mod common;
pub mod config;
pub mod handlers;
pub mod network;
pub mod tasks;
use config::Config;
use common::State;
pub fn run(config: Config){
let state = State::new();
let (request_sender, request_receiver) = unbounded();
let (response_sender, response_receiver) = unbounded();
for i in 0..config.request_workers {
let state = state.clone();
let config = config.clone();
let request_receiver = request_receiver.clone();
let response_sender = response_sender.clone();
Builder::new().name(format!("request-{:02}", i + 1)).spawn(move ||
handlers::run_request_worker(
state,
config,
request_receiver,
response_sender
)
).expect("spawn request worker");
}
let num_bound_sockets = Arc::new(AtomicUsize::new(0));
for i in 0..config.socket_workers {
let state = state.clone();
let config = config.clone();
let request_sender = request_sender.clone();
let response_receiver = response_receiver.clone();
let num_bound_sockets = num_bound_sockets.clone();
Builder::new().name(format!("socket-{:02}", i + 1)).spawn(move ||
network::run_socket_worker(
state,
config,
i,
request_sender,
response_receiver,
num_bound_sockets,
)
).expect("spawn socket worker");
}
if config.statistics.interval != 0 {
let state = state.clone();
let config = config.clone();
Builder::new().name("statistics-collector".to_string()).spawn(move ||
loop {
::std::thread::sleep(Duration::from_secs(
config.statistics.interval
));
tasks::gather_and_print_statistics(&state, &config);
}
).expect("spawn statistics thread");
}
if config.privileges.drop_privileges {
loop {
let sockets = num_bound_sockets.load(Ordering::SeqCst);
if sockets == config.socket_workers {
PrivDrop::default()
.chroot(config.privileges.chroot_path)
.user(config.privileges.user)
.apply()
.expect("drop privileges");
break;
}
::std::thread::sleep(Duration::from_millis(10));
}
}
loop {
::std::thread::sleep(Duration::from_secs(config.cleaning.interval));
tasks::clean_connections_and_torrents(&state);
}
}

View file

@ -0,0 +1,246 @@
use std::sync::{Arc, atomic::{AtomicUsize, Ordering}};
use std::io::{Cursor, ErrorKind};
use std::net::SocketAddr;
use std::time::Duration;
use std::vec::Drain;
use crossbeam_channel::{Sender, Receiver};
use mio::{Events, Poll, Interest, Token};
use mio::net::UdpSocket;
use net2::{UdpSocketExt, UdpBuilder};
use net2::unix::UnixUdpBuilderExt;
use bittorrent_udp::types::IpVersion;
use bittorrent_udp::converters::{response_to_bytes, request_from_bytes};
use crate::common::*;
use crate::config::Config;
pub fn run_socket_worker(
state: State,
config: Config,
token_num: usize,
request_sender: Sender<(Request, SocketAddr)>,
response_receiver: Receiver<(Response, SocketAddr)>,
num_bound_sockets: Arc<AtomicUsize>,
){
let mut buffer = [0u8; MAX_PACKET_SIZE];
let mut socket = UdpSocket::from_std(create_socket(&config));
let mut poll = Poll::new().expect("create poll");
let interests = Interest::READABLE;
poll.registry()
.register(&mut socket, Token(token_num), interests)
.unwrap();
num_bound_sockets.fetch_add(1, Ordering::SeqCst);
let mut events = Events::with_capacity(config.network.poll_event_capacity);
let mut requests: Vec<(Request, SocketAddr)> = Vec::new();
let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new();
let timeout = Duration::from_millis(50);
loop {
poll.poll(&mut events, Some(timeout))
.expect("failed polling");
for event in events.iter(){
let token = event.token();
if token.0 == token_num {
if event.is_readable(){
read_requests(
&state,
&config,
&mut socket,
&mut buffer,
&mut requests,
&mut local_responses,
);
for r in requests.drain(..){
if let Err(err) = request_sender.send(r){
eprintln!("error sending to request_sender: {}", err);
}
}
state.statistics.readable_events.fetch_add(1, Ordering::SeqCst);
poll.registry()
.reregister(&mut socket, token, interests)
.unwrap();
}
}
}
send_responses(
&state,
&config,
&mut socket,
&mut buffer,
&response_receiver,
local_responses.drain(..)
);
}
}
fn create_socket(config: &Config) -> ::std::net::UdpSocket {
let mut builder = &{
if config.network.address.is_ipv4(){
UdpBuilder::new_v4().expect("socket: build")
} else {
UdpBuilder::new_v6().expect("socket: build")
}
};
builder = builder.reuse_port(true)
.expect("socket: set reuse port");
let socket = builder.bind(&config.network.address)
.expect(&format!("socket: bind to {}", &config.network.address));
socket.set_nonblocking(true)
.expect("socket: set nonblocking");
let recv_buffer_size = config.network.socket_recv_buffer_size;
if recv_buffer_size != 0 {
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size){
eprintln!(
"socket: failed setting recv buffer to {}: {:?}",
recv_buffer_size,
err
);
}
}
socket
}
#[inline]
fn read_requests(
state: &State,
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
requests: &mut Vec<(Request, SocketAddr)>,
local_responses: &mut Vec<(Response, SocketAddr)>,
){
let mut requests_received: usize = 0;
let mut bytes_received: usize = 0;
loop {
match socket.recv_from(&mut buffer[..]) {
Ok((amt, src)) => {
let request = request_from_bytes(
&buffer[..amt],
config.network.max_scrape_torrents
);
bytes_received += amt;
if request.is_ok(){
requests_received += 1;
}
match request {
Ok(request) => {
requests.push((request, src));
},
Err(err) => {
eprintln!("request_from_bytes error: {:?}", err);
if let Some(transaction_id) = err.transaction_id {
let opt_message = if err.error.is_some() {
Some("Parse error".to_string())
} else if let Some(message) = err.message {
Some(message)
} else {
None
};
if let Some(message) = opt_message {
let response = ErrorResponse {
transaction_id,
message,
};
local_responses.push((response.into(), src));
}
}
},
}
},
Err(err) => {
if err.kind() == ErrorKind::WouldBlock {
break;
}
eprintln!("recv_from error: {}", err);
}
}
}
if config.statistics.interval != 0 {
state.statistics.requests_received
.fetch_add(requests_received, Ordering::SeqCst);
state.statistics.bytes_received
.fetch_add(bytes_received, Ordering::SeqCst);
}
}
#[inline]
fn send_responses(
state: &State,
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
response_receiver: &Receiver<(Response, SocketAddr)>,
local_responses: Drain<(Response, SocketAddr)>,
){
let mut responses_sent: usize = 0;
let mut bytes_sent: usize = 0;
let mut cursor = Cursor::new(buffer);
let response_iterator = local_responses.into_iter().chain(
response_receiver.try_iter()
);
for (response, src) in response_iterator {
cursor.set_position(0);
response_to_bytes(&mut cursor, response, IpVersion::IPv4).unwrap();
let amt = cursor.position() as usize;
match socket.send_to(&cursor.get_ref()[..amt], src){
Ok(amt) => {
responses_sent += 1;
bytes_sent += amt;
},
Err(err) => {
if err.kind() == ErrorKind::WouldBlock {
break;
}
eprintln!("send_to error: {}", err);
}
}
}
if config.statistics.interval != 0 {
state.statistics.responses_sent
.fetch_add(responses_sent, Ordering::SeqCst);
state.statistics.bytes_sent
.fetch_add(bytes_sent, Ordering::SeqCst);
}
}

View file

@ -0,0 +1,120 @@
use std::sync::atomic::Ordering;
use std::time::Instant;
use histogram::Histogram;
use crate::common::*;
use crate::config::Config;
pub fn clean_connections_and_torrents(state: &State){
let now = Instant::now();
let mut connections = state.connections.lock();
connections.retain(|_, v| v.0 > now);
connections.shrink_to_fit();
::std::mem::drop(connections);
let mut torrents = state.torrents.lock();
torrents.retain(|_, torrent| {
let num_seeders = &mut torrent.num_seeders;
let num_leechers = &mut torrent.num_leechers;
torrent.peers.retain(|_, peer| {
let keep = peer.valid_until.0 > now;
if !keep {
match peer.status {
PeerStatus::Seeding => {
*num_seeders -= 1;
},
PeerStatus::Leeching => {
*num_leechers -= 1;
},
_ => (),
};
}
keep
});
!torrent.peers.is_empty()
});
torrents.shrink_to_fit();
}
pub fn gather_and_print_statistics(
state: &State,
config: &Config,
){
let interval = config.statistics.interval;
let requests_received: f64 = state.statistics.requests_received
.fetch_and(0, Ordering::SeqCst) as f64;
let responses_sent: f64 = state.statistics.responses_sent
.fetch_and(0, Ordering::SeqCst) as f64;
let bytes_received: f64 = state.statistics.bytes_received
.fetch_and(0, Ordering::SeqCst) as f64;
let bytes_sent: f64 = state.statistics.bytes_sent
.fetch_and(0, Ordering::SeqCst) as f64;
let requests_per_second = requests_received / interval as f64;
let responses_per_second: f64 = responses_sent / interval as f64;
let bytes_received_per_second: f64 = bytes_received / interval as f64;
let bytes_sent_per_second: f64 = bytes_sent / interval as f64;
let readable_events: f64 = state.statistics.readable_events
.fetch_and(0, Ordering::SeqCst) as f64;
let requests_per_readable_event = if readable_events == 0.0 {
0.0
} else {
requests_received / readable_events
};
println!(
"stats: {:.2} requests/second, {:.2} responses/second, {:.2} requests/readable event",
requests_per_second,
responses_per_second,
requests_per_readable_event
);
println!(
"bandwidth: {:7.2} Mbit/s in, {:7.2} Mbit/s out",
bytes_received_per_second * 8.0 / 1_000_000.0,
bytes_sent_per_second * 8.0 / 1_000_000.0,
);
let mut peers_per_torrent = Histogram::new();
let torrents = &mut state.torrents.lock();
for torrent in torrents.values(){
let num_peers = (torrent.num_seeders + torrent.num_leechers) as u64;
if let Err(err) = peers_per_torrent.increment(num_peers){
eprintln!("error incrementing peers_per_torrent histogram: {}", err)
}
}
::std::mem::drop(torrents);
if peers_per_torrent.entries() != 0 {
println!(
"peers per torrent: min: {}, p50: {}, p75: {}, p90: {}, p99: {}, p999: {}, max: {}",
peers_per_torrent.minimum().unwrap(),
peers_per_torrent.percentile(50.0).unwrap(),
peers_per_torrent.percentile(75.0).unwrap(),
peers_per_torrent.percentile(90.0).unwrap(),
peers_per_torrent.percentile(99.0).unwrap(),
peers_per_torrent.percentile(99.9).unwrap(),
peers_per_torrent.maximum().unwrap(),
);
}
println!();
}