udp: use idiomatic folder structure (lib.rs + main.rs in src)

This commit is contained in:
Joakim Frostegård 2021-11-27 18:17:25 +01:00
parent b9bc5a2aa4
commit aa3253fcd6
9 changed files with 0 additions and 1 deletions

View file

@ -0,0 +1,3 @@
pub mod request;
pub mod socket;
pub mod statistics;

View file

@ -0,0 +1,405 @@
use std::collections::BTreeMap;
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::sync::atomic::Ordering;
use std::time::Duration;
use std::time::Instant;
use aquatic_common::ValidUntil;
use crossbeam_channel::Receiver;
use rand::{rngs::SmallRng, SeedableRng};
use aquatic_common::extract_response_peers;
use aquatic_udp_protocol::*;
use crate::common::*;
use crate::config::Config;
#[derive(Clone, PartialEq, Debug)]
pub struct ProtocolResponsePeer<I> {
pub ip_address: I,
pub port: Port,
}
impl<I: Ip> ProtocolResponsePeer<I> {
#[inline(always)]
fn from_peer(peer: &Peer<I>) -> Self {
Self {
ip_address: peer.ip_address,
port: peer.port,
}
}
}
pub struct ProtocolAnnounceResponse<I> {
pub transaction_id: TransactionId,
pub announce_interval: AnnounceInterval,
pub leechers: NumberOfPeers,
pub seeders: NumberOfPeers,
pub peers: Vec<ProtocolResponsePeer<I>>,
}
impl Into<ConnectedResponse> for ProtocolAnnounceResponse<Ipv4Addr> {
fn into(self) -> ConnectedResponse {
ConnectedResponse::AnnounceIpv4(AnnounceResponseIpv4 {
transaction_id: self.transaction_id,
announce_interval: self.announce_interval,
leechers: self.leechers,
seeders: self.seeders,
peers: self
.peers
.into_iter()
.map(|peer| ResponsePeerIpv4 {
ip_address: peer.ip_address,
port: peer.port,
})
.collect(),
})
}
}
impl Into<ConnectedResponse> for ProtocolAnnounceResponse<Ipv6Addr> {
fn into(self) -> ConnectedResponse {
ConnectedResponse::AnnounceIpv6(AnnounceResponseIpv6 {
transaction_id: self.transaction_id,
announce_interval: self.announce_interval,
leechers: self.leechers,
seeders: self.seeders,
peers: self
.peers
.into_iter()
.map(|peer| ResponsePeerIpv6 {
ip_address: peer.ip_address,
port: peer.port,
})
.collect(),
})
}
}
pub fn run_request_worker(
config: Config,
state: State,
request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, SocketAddr)>,
response_sender: ConnectedResponseSender,
worker_index: RequestWorkerIndex,
) {
let mut torrents = TorrentMaps::default();
let mut small_rng = SmallRng::from_entropy();
let timeout = Duration::from_millis(config.handlers.channel_recv_timeout_ms);
let mut peer_valid_until = ValidUntil::new(config.cleaning.max_peer_age);
let cleaning_interval = Duration::from_secs(config.cleaning.torrent_cleaning_interval);
let statistics_update_interval = Duration::from_secs(config.statistics.interval);
let mut last_cleaning = Instant::now();
let mut last_statistics_update = Instant::now();
let mut iter_counter = 0usize;
loop {
if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) {
let response = match request {
ConnectedRequest::Announce(request) => handle_announce_request(
&config,
&mut small_rng,
&mut torrents,
request,
src,
peer_valid_until,
),
ConnectedRequest::Scrape(request) => {
ConnectedResponse::Scrape(handle_scrape_request(&mut torrents, src, request))
}
};
response_sender.try_send_to(sender_index, response, src);
}
if iter_counter % 128 == 0 {
let now = Instant::now();
peer_valid_until = ValidUntil::new_with_now(now, config.cleaning.max_peer_age);
if now > last_cleaning + cleaning_interval {
torrents.clean(&config, &state.access_list);
if !statistics_update_interval.is_zero() {
let peers_ipv4 = torrents.ipv4.values().map(|t| t.peers.len()).sum();
let peers_ipv6 = torrents.ipv6.values().map(|t| t.peers.len()).sum();
state.statistics_ipv4.peers[worker_index.0]
.store(peers_ipv4, Ordering::Release);
state.statistics_ipv6.peers[worker_index.0]
.store(peers_ipv6, Ordering::Release);
}
last_cleaning = now;
}
if !statistics_update_interval.is_zero()
&& now > last_statistics_update + statistics_update_interval
{
state.statistics_ipv4.torrents[worker_index.0]
.store(torrents.ipv4.len(), Ordering::Release);
state.statistics_ipv6.torrents[worker_index.0]
.store(torrents.ipv6.len(), Ordering::Release);
last_statistics_update = now;
}
}
iter_counter = iter_counter.wrapping_add(1);
}
}
pub fn handle_announce_request(
config: &Config,
rng: &mut SmallRng,
torrents: &mut TorrentMaps,
request: AnnounceRequest,
src: SocketAddr,
peer_valid_until: ValidUntil,
) -> ConnectedResponse {
match src.ip() {
IpAddr::V4(ip) => handle_announce_request_inner(
config,
rng,
&mut torrents.ipv4,
request,
ip,
peer_valid_until,
)
.into(),
IpAddr::V6(ip) => handle_announce_request_inner(
config,
rng,
&mut torrents.ipv6,
request,
ip,
peer_valid_until,
)
.into(),
}
}
fn handle_announce_request_inner<I: Ip>(
config: &Config,
rng: &mut SmallRng,
torrents: &mut TorrentMap<I>,
request: AnnounceRequest,
peer_ip: I,
peer_valid_until: ValidUntil,
) -> ProtocolAnnounceResponse<I> {
let peer_status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left);
let peer = Peer {
ip_address: peer_ip,
port: request.port,
status: peer_status,
valid_until: peer_valid_until,
};
let torrent_data = torrents.entry(request.info_hash).or_default();
let opt_removed_peer = match peer_status {
PeerStatus::Leeching => {
torrent_data.num_leechers += 1;
torrent_data.peers.insert(request.peer_id, peer)
}
PeerStatus::Seeding => {
torrent_data.num_seeders += 1;
torrent_data.peers.insert(request.peer_id, peer)
}
PeerStatus::Stopped => torrent_data.peers.remove(&request.peer_id),
};
match opt_removed_peer.map(|peer| peer.status) {
Some(PeerStatus::Leeching) => {
torrent_data.num_leechers -= 1;
}
Some(PeerStatus::Seeding) => {
torrent_data.num_seeders -= 1;
}
_ => {}
}
let max_num_peers_to_take = calc_max_num_peers_to_take(config, request.peers_wanted.0);
let response_peers = extract_response_peers(
rng,
&torrent_data.peers,
max_num_peers_to_take,
request.peer_id,
ProtocolResponsePeer::from_peer,
);
ProtocolAnnounceResponse {
transaction_id: request.transaction_id,
announce_interval: AnnounceInterval(config.protocol.peer_announce_interval),
leechers: NumberOfPeers(torrent_data.num_leechers as i32),
seeders: NumberOfPeers(torrent_data.num_seeders as i32),
peers: response_peers,
}
}
#[inline]
fn calc_max_num_peers_to_take(config: &Config, peers_wanted: i32) -> usize {
if peers_wanted <= 0 {
config.protocol.max_response_peers as usize
} else {
::std::cmp::min(
config.protocol.max_response_peers as usize,
peers_wanted as usize,
)
}
}
pub fn handle_scrape_request(
torrents: &mut TorrentMaps,
src: SocketAddr,
request: PendingScrapeRequest,
) -> PendingScrapeResponse {
const EMPTY_STATS: TorrentScrapeStatistics = create_torrent_scrape_statistics(0, 0);
let mut torrent_stats: BTreeMap<usize, TorrentScrapeStatistics> = BTreeMap::new();
if src.ip().is_ipv4() {
torrent_stats.extend(request.info_hashes.into_iter().map(|(i, info_hash)| {
let s = if let Some(torrent_data) = torrents.ipv4.get(&info_hash) {
create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
)
} else {
EMPTY_STATS
};
(i, s)
}));
} else {
torrent_stats.extend(request.info_hashes.into_iter().map(|(i, info_hash)| {
let s = if let Some(torrent_data) = torrents.ipv6.get(&info_hash) {
create_torrent_scrape_statistics(
torrent_data.num_seeders as i32,
torrent_data.num_leechers as i32,
)
} else {
EMPTY_STATS
};
(i, s)
}));
}
PendingScrapeResponse {
transaction_id: request.transaction_id,
torrent_stats,
}
}
#[inline(always)]
const fn create_torrent_scrape_statistics(seeders: i32, leechers: i32) -> TorrentScrapeStatistics {
TorrentScrapeStatistics {
seeders: NumberOfPeers(seeders),
completed: NumberOfDownloads(0), // No implementation planned
leechers: NumberOfPeers(leechers),
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::net::Ipv4Addr;
use quickcheck::{quickcheck, TestResult};
use rand::thread_rng;
use super::*;
fn gen_peer_id(i: u32) -> PeerId {
let mut peer_id = PeerId([0; 20]);
peer_id.0[0..4].copy_from_slice(&i.to_ne_bytes());
peer_id
}
fn gen_peer(i: u32) -> Peer<Ipv4Addr> {
Peer {
ip_address: Ipv4Addr::from(i.to_be_bytes()),
port: Port(1),
status: PeerStatus::Leeching,
valid_until: ValidUntil::new(0),
}
}
#[test]
fn test_extract_response_peers() {
fn prop(data: (u16, u16)) -> TestResult {
let gen_num_peers = data.0 as u32;
let req_num_peers = data.1 as usize;
let mut peer_map: PeerMap<Ipv4Addr> = Default::default();
let mut opt_sender_key = None;
let mut opt_sender_peer = None;
for i in 0..gen_num_peers {
let key = gen_peer_id(i);
let peer = gen_peer((i << 16) + i);
if i == 0 {
opt_sender_key = Some(key);
opt_sender_peer = Some(ProtocolResponsePeer::from_peer(&peer));
}
peer_map.insert(key, peer);
}
let mut rng = thread_rng();
let peers = extract_response_peers(
&mut rng,
&peer_map,
req_num_peers,
opt_sender_key.unwrap_or_else(|| gen_peer_id(1)),
ProtocolResponsePeer::from_peer,
);
// Check that number of returned peers is correct
let mut success = peers.len() <= req_num_peers;
if req_num_peers >= gen_num_peers as usize {
success &= peers.len() == gen_num_peers as usize
|| peers.len() + 1 == gen_num_peers as usize;
}
// Check that returned peers are unique (no overlap) and that sender
// isn't returned
let mut ip_addresses = HashSet::with_capacity(peers.len());
for peer in peers {
if peer == opt_sender_peer.clone().unwrap()
|| ip_addresses.contains(&peer.ip_address)
{
success = false;
break;
}
ip_addresses.insert(peer.ip_address);
}
TestResult::from_bool(success)
}
quickcheck(prop as fn((u16, u16)) -> TestResult);
}
}

View file

@ -0,0 +1,586 @@
use std::collections::BTreeMap;
use std::io::{Cursor, ErrorKind};
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::{Duration, Instant};
use std::vec::Drain;
use crossbeam_channel::Receiver;
use mio::net::UdpSocket;
use mio::{Events, Interest, Poll, Token};
use rand::prelude::{Rng, SeedableRng, StdRng};
use aquatic_common::access_list::create_access_list_cache;
use aquatic_common::access_list::AccessListCache;
use aquatic_common::AHashIndexMap;
use aquatic_common::ValidUntil;
use aquatic_udp_protocol::*;
use socket2::{Domain, Protocol, Socket, Type};
use crate::common::*;
use crate::config::Config;
#[derive(Default)]
pub struct ConnectionMap(AHashIndexMap<(ConnectionId, SocketAddr), ValidUntil>);
impl ConnectionMap {
pub fn insert(
&mut self,
connection_id: ConnectionId,
socket_addr: SocketAddr,
valid_until: ValidUntil,
) {
self.0.insert((connection_id, socket_addr), valid_until);
}
pub fn contains(&self, connection_id: ConnectionId, socket_addr: SocketAddr) -> bool {
self.0.contains_key(&(connection_id, socket_addr))
}
pub fn clean(&mut self) {
let now = Instant::now();
self.0.retain(|_, v| v.0 > now);
self.0.shrink_to_fit();
}
}
pub struct PendingScrapeResponseMeta {
num_pending: usize,
valid_until: ValidUntil,
}
#[derive(Default)]
pub struct PendingScrapeResponseMap(
AHashIndexMap<TransactionId, (PendingScrapeResponseMeta, PendingScrapeResponse)>,
);
impl PendingScrapeResponseMap {
pub fn prepare(
&mut self,
transaction_id: TransactionId,
num_pending: usize,
valid_until: ValidUntil,
) {
let meta = PendingScrapeResponseMeta {
num_pending,
valid_until,
};
let response = PendingScrapeResponse {
transaction_id,
torrent_stats: BTreeMap::new(),
};
self.0.insert(transaction_id, (meta, response));
}
pub fn add_and_get_finished(&mut self, response: PendingScrapeResponse) -> Option<Response> {
let finished = if let Some(r) = self.0.get_mut(&response.transaction_id) {
r.0.num_pending -= 1;
r.1.torrent_stats.extend(response.torrent_stats.into_iter());
r.0.num_pending == 0
} else {
::log::warn!("PendingScrapeResponses.add didn't find PendingScrapeResponse in map");
false
};
if finished {
let response = self.0.remove(&response.transaction_id).unwrap().1;
Some(Response::Scrape(ScrapeResponse {
transaction_id: response.transaction_id,
torrent_stats: response.torrent_stats.into_values().collect(),
}))
} else {
None
}
}
pub fn clean(&mut self) {
let now = Instant::now();
self.0.retain(|_, v| v.0.valid_until.0 > now);
self.0.shrink_to_fit();
}
}
pub fn run_socket_worker(
state: State,
config: Config,
token_num: usize,
request_sender: ConnectedRequestSender,
response_receiver: Receiver<(ConnectedResponse, SocketAddr)>,
num_bound_sockets: Arc<AtomicUsize>,
) {
let mut rng = StdRng::from_entropy();
let mut buffer = [0u8; MAX_PACKET_SIZE];
let mut socket = UdpSocket::from_std(create_socket(&config));
let mut poll = Poll::new().expect("create poll");
let interests = Interest::READABLE;
poll.registry()
.register(&mut socket, Token(token_num), interests)
.unwrap();
num_bound_sockets.fetch_add(1, Ordering::SeqCst);
let mut events = Events::with_capacity(config.network.poll_event_capacity);
let mut connections = ConnectionMap::default();
let mut pending_scrape_responses = PendingScrapeResponseMap::default();
let mut access_list_cache = create_access_list_cache(&state.access_list);
let mut local_responses: Vec<(Response, SocketAddr)> = Vec::new();
let poll_timeout = Duration::from_millis(config.network.poll_timeout_ms);
let connection_cleaning_duration =
Duration::from_secs(config.cleaning.connection_cleaning_interval);
let pending_scrape_cleaning_duration =
Duration::from_secs(config.cleaning.pending_scrape_cleaning_interval);
let mut connection_valid_until = ValidUntil::new(config.cleaning.max_connection_age);
let mut pending_scrape_valid_until = ValidUntil::new(config.cleaning.max_pending_scrape_age);
let mut last_connection_cleaning = Instant::now();
let mut last_pending_scrape_cleaning = Instant::now();
let mut iter_counter = 0usize;
loop {
poll.poll(&mut events, Some(poll_timeout))
.expect("failed polling");
for event in events.iter() {
let token = event.token();
if (token.0 == token_num) & event.is_readable() {
read_requests(
&config,
&state,
&mut connections,
&mut pending_scrape_responses,
&mut access_list_cache,
&mut rng,
&mut socket,
&mut buffer,
&request_sender,
&mut local_responses,
connection_valid_until,
pending_scrape_valid_until,
);
}
}
send_responses(
&state,
&config,
&mut socket,
&mut buffer,
&response_receiver,
&mut pending_scrape_responses,
local_responses.drain(..),
);
// Run periodic ValidUntil updates and state cleaning
if iter_counter % 128 == 0 {
let now = Instant::now();
connection_valid_until =
ValidUntil::new_with_now(now, config.cleaning.max_connection_age);
pending_scrape_valid_until =
ValidUntil::new_with_now(now, config.cleaning.max_pending_scrape_age);
if now > last_connection_cleaning + connection_cleaning_duration {
connections.clean();
last_connection_cleaning = now;
}
if now > last_pending_scrape_cleaning + pending_scrape_cleaning_duration {
pending_scrape_responses.clean();
last_pending_scrape_cleaning = now;
}
}
iter_counter = iter_counter.wrapping_add(1);
}
}
#[inline]
fn read_requests(
config: &Config,
state: &State,
connections: &mut ConnectionMap,
pending_scrape_responses: &mut PendingScrapeResponseMap,
access_list_cache: &mut AccessListCache,
rng: &mut StdRng,
socket: &mut UdpSocket,
buffer: &mut [u8],
request_sender: &ConnectedRequestSender,
local_responses: &mut Vec<(Response, SocketAddr)>,
connection_valid_until: ValidUntil,
pending_scrape_valid_until: ValidUntil,
) {
let mut requests_received_ipv4: usize = 0;
let mut requests_received_ipv6: usize = 0;
let mut bytes_received_ipv4: usize = 0;
let mut bytes_received_ipv6 = 0;
loop {
match socket.recv_from(&mut buffer[..]) {
Ok((amt, src)) => {
let res_request =
Request::from_bytes(&buffer[..amt], config.protocol.max_scrape_torrents);
let src = match src {
src @ SocketAddr::V4(_) => src,
SocketAddr::V6(src) => {
match src.ip().octets() {
// Convert IPv4-mapped address (available in std but nightly-only)
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => {
SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(a, b, c, d),
src.port(),
))
}
_ => src.into(),
}
}
};
// Update statistics for converted address
if src.is_ipv4() {
if res_request.is_ok() {
requests_received_ipv4 += 1;
}
bytes_received_ipv4 += amt;
} else {
if res_request.is_ok() {
requests_received_ipv6 += 1;
}
bytes_received_ipv6 += amt;
}
handle_request(
config,
connections,
pending_scrape_responses,
access_list_cache,
rng,
request_sender,
local_responses,
connection_valid_until,
pending_scrape_valid_until,
res_request,
src,
);
}
Err(err) => {
if err.kind() == ErrorKind::WouldBlock {
break;
}
::log::info!("recv_from error: {}", err);
}
}
}
if config.statistics.interval != 0 {
state
.statistics_ipv4
.requests_received
.fetch_add(requests_received_ipv4, Ordering::Release);
state
.statistics_ipv6
.requests_received
.fetch_add(requests_received_ipv6, Ordering::Release);
state
.statistics_ipv4
.bytes_received
.fetch_add(bytes_received_ipv4, Ordering::Release);
state
.statistics_ipv6
.bytes_received
.fetch_add(bytes_received_ipv6, Ordering::Release);
}
}
pub fn handle_request(
config: &Config,
connections: &mut ConnectionMap,
pending_scrape_responses: &mut PendingScrapeResponseMap,
access_list_cache: &mut AccessListCache,
rng: &mut StdRng,
request_sender: &ConnectedRequestSender,
local_responses: &mut Vec<(Response, SocketAddr)>,
connection_valid_until: ValidUntil,
pending_scrape_valid_until: ValidUntil,
res_request: Result<Request, RequestParseError>,
src: SocketAddr,
) {
let access_list_mode = config.access_list.mode;
match res_request {
Ok(Request::Connect(request)) => {
let connection_id = ConnectionId(rng.gen());
connections.insert(connection_id, src, connection_valid_until);
let response = Response::Connect(ConnectResponse {
connection_id,
transaction_id: request.transaction_id,
});
local_responses.push((response, src))
}
Ok(Request::Announce(request)) => {
if connections.contains(request.connection_id, src) {
if access_list_cache
.load()
.allows(access_list_mode, &request.info_hash.0)
{
let worker_index =
RequestWorkerIndex::from_info_hash(config, request.info_hash);
request_sender.try_send_to(
worker_index,
ConnectedRequest::Announce(request),
src,
);
} else {
let response = Response::Error(ErrorResponse {
transaction_id: request.transaction_id,
message: "Info hash not allowed".into(),
});
local_responses.push((response, src))
}
}
}
Ok(Request::Scrape(request)) => {
if connections.contains(request.connection_id, src) {
let mut requests: AHashIndexMap<RequestWorkerIndex, PendingScrapeRequest> =
Default::default();
let transaction_id = request.transaction_id;
for (i, info_hash) in request.info_hashes.into_iter().enumerate() {
let pending = requests
.entry(RequestWorkerIndex::from_info_hash(&config, info_hash))
.or_insert_with(|| PendingScrapeRequest {
transaction_id,
info_hashes: BTreeMap::new(),
});
pending.info_hashes.insert(i, info_hash);
}
pending_scrape_responses.prepare(
transaction_id,
requests.len(),
pending_scrape_valid_until,
);
for (request_worker_index, request) in requests {
request_sender.try_send_to(
request_worker_index,
ConnectedRequest::Scrape(request),
src,
);
}
}
}
Err(err) => {
::log::debug!("Request::from_bytes error: {:?}", err);
if let RequestParseError::Sendable {
connection_id,
transaction_id,
err,
} = err
{
if connections.contains(connection_id, src) {
let response = ErrorResponse {
transaction_id,
message: err.right_or("Parse error").into(),
};
local_responses.push((response.into(), src));
}
}
}
}
}
#[inline]
fn send_responses(
state: &State,
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
response_receiver: &Receiver<(ConnectedResponse, SocketAddr)>,
pending_scrape_responses: &mut PendingScrapeResponseMap,
local_responses: Drain<(Response, SocketAddr)>,
) {
let mut responses_sent_ipv4: usize = 0;
let mut responses_sent_ipv6: usize = 0;
let mut bytes_sent_ipv4: usize = 0;
let mut bytes_sent_ipv6: usize = 0;
for (response, addr) in local_responses {
send_response(
config,
socket,
buffer,
&mut responses_sent_ipv4,
&mut responses_sent_ipv6,
&mut bytes_sent_ipv4,
&mut bytes_sent_ipv6,
response,
addr,
);
}
for (response, addr) in response_receiver.try_iter() {
let opt_response = match response {
ConnectedResponse::Scrape(r) => pending_scrape_responses.add_and_get_finished(r),
ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)),
ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)),
};
if let Some(response) = opt_response {
send_response(
config,
socket,
buffer,
&mut responses_sent_ipv4,
&mut responses_sent_ipv6,
&mut bytes_sent_ipv4,
&mut bytes_sent_ipv6,
response,
addr,
);
}
}
if config.statistics.interval != 0 {
state
.statistics_ipv4
.responses_sent
.fetch_add(responses_sent_ipv4, Ordering::Release);
state
.statistics_ipv6
.responses_sent
.fetch_add(responses_sent_ipv6, Ordering::Release);
state
.statistics_ipv4
.bytes_sent
.fetch_add(bytes_sent_ipv4, Ordering::Release);
state
.statistics_ipv6
.bytes_sent
.fetch_add(bytes_sent_ipv6, Ordering::Release);
}
}
fn send_response(
config: &Config,
socket: &mut UdpSocket,
buffer: &mut [u8],
responses_sent_ipv4: &mut usize,
responses_sent_ipv6: &mut usize,
bytes_sent_ipv4: &mut usize,
bytes_sent_ipv6: &mut usize,
response: Response,
addr: SocketAddr,
) {
let mut cursor = Cursor::new(buffer);
let addr_is_ipv4 = addr.is_ipv4();
let addr = if config.network.address.is_ipv4() {
if let SocketAddr::V4(addr) = addr {
SocketAddr::V4(addr)
} else {
unreachable!()
}
} else {
match addr {
SocketAddr::V4(addr) => {
let ip = addr.ip().to_ipv6_mapped();
SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0))
}
addr => addr,
}
};
match response.write(&mut cursor) {
Ok(()) => {
let amt = cursor.position() as usize;
match socket.send_to(&cursor.get_ref()[..amt], addr) {
Ok(amt) => {
if addr_is_ipv4 {
*responses_sent_ipv4 += 1;
*bytes_sent_ipv4 += amt;
} else {
*responses_sent_ipv6 += 1;
*bytes_sent_ipv6 += amt;
}
}
Err(err) => {
::log::info!("send_to error: {}", err);
}
}
}
Err(err) => {
::log::error!("Response::write error: {:?}", err);
}
}
}
pub fn create_socket(config: &Config) -> ::std::net::UdpSocket {
let socket = if config.network.address.is_ipv4() {
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))
} else {
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))
}
.expect("create socket");
if config.network.only_ipv6 {
socket.set_only_v6(true).expect("socket: set only ipv6");
}
socket.set_reuse_port(true).expect("socket: set reuse port");
socket
.set_nonblocking(true)
.expect("socket: set nonblocking");
socket
.bind(&config.network.address.into())
.unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", config.network.address, err));
let recv_buffer_size = config.network.socket_recv_buffer_size;
if recv_buffer_size != 0 {
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
::log::error!(
"socket: failed setting recv buffer to {}: {:?}",
recv_buffer_size,
err
);
}
}
socket.into()
}

View file

@ -0,0 +1,73 @@
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Duration, Instant};
use crate::common::*;
use crate::config::Config;
pub fn run_statistics_worker(config: Config, state: State) {
let ipv4_active = config.network.address.is_ipv4() || !config.network.only_ipv6;
let ipv6_active = config.network.address.is_ipv6();
let mut last_ipv4 = Instant::now();
let mut last_ipv6 = Instant::now();
loop {
::std::thread::sleep(Duration::from_secs(config.statistics.interval));
println!("General:");
println!(" access list entries: {}", state.access_list.load().len());
if ipv4_active {
println!("IPv4:");
gather_and_print_for_protocol(&config, &state.statistics_ipv4, &mut last_ipv4);
}
if ipv6_active {
println!("IPv6:");
gather_and_print_for_protocol(&config, &state.statistics_ipv6, &mut last_ipv6);
}
println!();
}
}
fn gather_and_print_for_protocol(config: &Config, statistics: &Statistics, last: &mut Instant) {
let requests_received: f64 = statistics.requests_received.fetch_and(0, Ordering::AcqRel) as f64;
let responses_sent: f64 = statistics.responses_sent.fetch_and(0, Ordering::AcqRel) as f64;
let bytes_received: f64 = statistics.bytes_received.fetch_and(0, Ordering::AcqRel) as f64;
let bytes_sent: f64 = statistics.bytes_sent.fetch_and(0, Ordering::AcqRel) as f64;
let now = Instant::now();
let elapsed = (now - *last).as_secs_f64();
*last = now;
let requests_per_second = requests_received / elapsed;
let responses_per_second: f64 = responses_sent / elapsed;
let bytes_received_per_second: f64 = bytes_received / elapsed;
let bytes_sent_per_second: f64 = bytes_sent / elapsed;
let num_torrents: usize = sum_atomic_usizes(&statistics.torrents);
let num_peers = sum_atomic_usizes(&statistics.peers);
println!(
" requests/second: {:10.2}, responses/second: {:10.2}",
requests_per_second, responses_per_second
);
println!(
" bandwidth: {:7.2} Mbit/s in, {:7.2} Mbit/s out",
bytes_received_per_second * 8.0 / 1_000_000.0,
bytes_sent_per_second * 8.0 / 1_000_000.0,
);
println!(" number of torrents: {}", num_torrents);
println!(
" number of peers: {} (updated every {} seconds)",
num_peers, config.cleaning.torrent_cleaning_interval
);
}
fn sum_atomic_usizes(values: &[AtomicUsize]) -> usize {
values.iter().map(|n| n.load(Ordering::Acquire)).sum()
}