mirror of
https://github.com/YGGverse/aquatic.git
synced 2026-03-31 17:55:36 +00:00
aquatic: count received requests / sent responses, print on interval
This commit is contained in:
parent
ea2c37789d
commit
19277831c8
5 changed files with 52 additions and 2 deletions
3
TODO.md
3
TODO.md
|
|
@ -15,8 +15,7 @@
|
|||
* Performance
|
||||
* cpu-target=native good?
|
||||
* mialloc good?
|
||||
* Track received requests and sent responses, as well as bytes transferred,
|
||||
in atomic u64 / usize (repurposed to hold f64 for bytes)?
|
||||
* Track bytes transferred?
|
||||
|
||||
## Don't do
|
||||
|
||||
|
|
|
|||
|
|
@ -117,10 +117,20 @@ impl Default for TorrentData {
|
|||
pub type TorrentMap = DashMap<InfoHash, TorrentData>;
|
||||
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Statistics {
|
||||
pub requests_received: AtomicUsize,
|
||||
pub responses_sent: AtomicUsize,
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
pub connections: Arc<ConnectionMap>,
|
||||
pub torrents: Arc<TorrentMap>,
|
||||
pub statistics: Arc<Statistics>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
|
|
@ -128,6 +138,7 @@ impl State {
|
|||
Self {
|
||||
connections: Arc::new(DashMap::new()),
|
||||
torrents: Arc::new(DashMap::new()),
|
||||
statistics: Arc::new(Statistics::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ pub struct Config {
|
|||
pub recv_buffer_size: usize,
|
||||
pub max_scrape_torrents: u8,
|
||||
pub max_response_peers: usize,
|
||||
pub statistics_interval: u64,
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -17,6 +18,7 @@ impl Default for Config {
|
|||
recv_buffer_size: 4096 * 16,
|
||||
max_scrape_torrents: 255,
|
||||
max_response_peers: 255,
|
||||
statistics_interval: 5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,3 +1,4 @@
|
|||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
|
||||
pub mod common;
|
||||
|
|
@ -25,6 +26,29 @@ pub fn run(){
|
|||
});
|
||||
}
|
||||
|
||||
{
|
||||
let state = state.clone();
|
||||
|
||||
::std::thread::spawn(move || {
|
||||
let interval = config.statistics_interval;
|
||||
|
||||
loop {
|
||||
::std::thread::sleep(Duration::from_secs(interval));
|
||||
|
||||
let requests_per_second: f64 = state.statistics.requests_received
|
||||
.fetch_and(0, Ordering::SeqCst) as f64 / interval as f64;
|
||||
let responses_per_second: f64 = state.statistics.responses_sent
|
||||
.fetch_and(0, Ordering::SeqCst) as f64 / interval as f64;
|
||||
|
||||
println!(
|
||||
"stats: {} requests/second, {} responses/second",
|
||||
requests_per_second,
|
||||
responses_per_second
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
loop {
|
||||
::std::thread::sleep(Duration::from_secs(30));
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
use std::sync::atomic::Ordering;
|
||||
use std::net::SocketAddr;
|
||||
use std::io::ErrorKind;
|
||||
|
||||
|
|
@ -109,6 +110,9 @@ fn handle_readable_socket(
|
|||
announce_requests: &mut Vec<(AnnounceRequest, SocketAddr)>,
|
||||
scrape_requests: &mut Vec<(ScrapeRequest, SocketAddr)>,
|
||||
){
|
||||
let mut requests_received: usize = 0;
|
||||
let mut responses_sent: usize = 0;
|
||||
|
||||
loop {
|
||||
match socket.recv_from(buffer) {
|
||||
Ok((amt, src)) => {
|
||||
|
|
@ -117,6 +121,10 @@ fn handle_readable_socket(
|
|||
config.max_scrape_torrents
|
||||
);
|
||||
|
||||
if request.is_ok(){
|
||||
requests_received += 1;
|
||||
}
|
||||
|
||||
match request {
|
||||
Ok(Request::Connect(r)) => {
|
||||
connect_requests.push((r, src));
|
||||
|
|
@ -175,6 +183,7 @@ fn handle_readable_socket(
|
|||
|
||||
match socket.send_to(&bytes[..], src){
|
||||
Ok(_bytes_sent) => {
|
||||
responses_sent += 1;
|
||||
},
|
||||
Err(err) => {
|
||||
match err.kind(){
|
||||
|
|
@ -188,4 +197,9 @@ fn handle_readable_socket(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
state.statistics.requests_received
|
||||
.fetch_add(requests_received, Ordering::SeqCst);
|
||||
state.statistics.responses_sent
|
||||
.fetch_add(responses_sent, Ordering::SeqCst);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue