bench_handlers: run several test rounds, print averages

This commit is contained in:
Joakim Frostegård 2020-04-06 03:43:06 +02:00
parent 9d66a5e7d0
commit 8c57b3c4fe
7 changed files with 127 additions and 26 deletions

26
Cargo.lock generated
View file

@ -28,6 +28,7 @@ dependencies = [
"mimalloc",
"mio",
"net2",
"num-format",
"plotly",
"quickcheck",
"quickcheck_macros",
@ -35,6 +36,15 @@ dependencies = [
"rand_distr",
]
[[package]]
name = "arrayvec"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
dependencies = [
"nodrop",
]
[[package]]
name = "askama"
version = "0.9.0"
@ -280,6 +290,12 @@ dependencies = [
"winapi",
]
[[package]]
name = "nodrop"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
[[package]]
name = "nom"
version = "5.1.1"
@ -334,6 +350,16 @@ dependencies = [
"num-traits",
]
[[package]]
name = "num-format"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465"
dependencies = [
"arrayvec",
"itoa",
]
[[package]]
name = "num-integer"
version = "0.1.42"

View file

@ -5,6 +5,11 @@
* extract_response_peers
* Cleaner code
* Stack-allocated vector?
* Benchmarks
* Seperate setup so actual benchmarks can be run after each other,
enabling better profiling
* Show standard deviation?
* Send in connect reponse ids to other functions as integration test
## Don't do

View file

@ -23,6 +23,9 @@ dashmap = "3"
indexmap = "1"
mimalloc = "0.1"
net2 = "0.2"
# bench_handlers / plot_pareto
num-format = "0.4"
rand_distr = "0.2"
plotly = "0.4"

View file

@ -18,8 +18,8 @@ pub fn bench(
rng: &mut impl Rng,
state: &State,
info_hashes: &Vec<InfoHash>
){
println!("# benchmark: handle_announce_requests\n");
) -> (f64, f64) {
println!("## benchmark: handle_announce_requests\n");
println!("generating data..");
@ -40,7 +40,7 @@ pub fn bench(
let announce_requests = announce_requests.drain(..);
::std::thread::sleep(Duration::from_secs(1));
::std::thread::sleep(Duration::from_millis(100));
let now = Instant::now();
@ -54,8 +54,11 @@ pub fn bench(
let duration = Instant::now() - now;
println!("\nrequests/second: {:.2}", ANNOUNCE_REQUESTS as f64 / (duration.as_millis() as f64 / 1000.0));
println!("time per request: {:.2}ns", duration.as_nanos() as f64 / ANNOUNCE_REQUESTS as f64);
let requests_per_second = ANNOUNCE_REQUESTS as f64 / (duration.as_millis() as f64 / 1000.0);
let time_per_request = duration.as_nanos() as f64 / ANNOUNCE_REQUESTS as f64;
println!("\nrequests/second: {:.2}", requests_per_second);
println!("time per request: {:.2}ns", time_per_request);
let mut total_num_peers = 0.0f64;
let mut max_num_peers = 0.0f64;
@ -77,6 +80,8 @@ pub fn bench(
println!("avg num peers returned: {:.2}", total_num_peers / ANNOUNCE_REQUESTS as f64);
println!("max num peers returned: {:.2}", max_num_peers);
(requests_per_second, time_per_request)
}

View file

@ -1,4 +1,4 @@
use std::time::Instant;
use std::time::{Instant, Duration};
use std::net::SocketAddr;
use rand::{Rng, thread_rng, rngs::SmallRng, SeedableRng};
@ -10,8 +10,8 @@ use aquatic::handlers::handle_connect_requests;
const ITERATIONS: usize = 10_000_000;
pub fn bench(){
println!("# benchmark: handle_connect_requests\n");
pub fn bench() -> (f64, f64){
println!("## benchmark: handle_connect_requests\n");
let state = State::new();
let mut responses = Vec::new();
@ -21,14 +21,19 @@ pub fn bench(){
println!("running benchmark..");
::std::thread::sleep(Duration::from_millis(100));
let now = Instant::now();
handle_connect_requests(&state, &mut responses, requests);
let duration = Instant::now() - now;
println!("\nrequests/second: {:.2}", ITERATIONS as f64 / (duration.as_millis() as f64 / 1000.0));
println!("time per request: {:.2}ns", duration.as_nanos() as f64 / ITERATIONS as f64);
let requests_per_second = ITERATIONS as f64 / (duration.as_millis() as f64 / 1000.0);
let time_per_request = duration.as_nanos() as f64 / ITERATIONS as f64;
println!("\nrequests/second: {:.2}", requests_per_second);
println!("time per request: {:.2}ns", time_per_request);
let mut dummy = 0usize;
let mut num_responses: usize = 0;
@ -50,6 +55,8 @@ pub fn bench(){
if dummy == ITERATIONS {
println!("dummy test output: {}", dummy);
}
(requests_per_second, time_per_request)
}

View file

@ -1,7 +1,17 @@
//! Benchmark announce and scrape handlers
//!
//! Example summary output:
//! ```
//! # Average results over 20 rounds
//!
//! connect handler: 3 365 415 requests/second, 297.41 ns/request
//! announce handler: 346 650 requests/second, 2921.76 ns/request
//! scrape handler: 1 313 100 requests/second, 762.47 ns/request
//! ```
use std::time::Duration;
use num_format::{Locale, ToFormattedString};
use rand::{Rng, thread_rng, rngs::SmallRng, SeedableRng};
use aquatic::common::*;
@ -17,26 +27,66 @@ mod scrape;
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
macro_rules! print_results {
($request_type:expr, $num_rounds:expr, $data:expr) => {
let per_second = (
($data.0 / ($num_rounds as f64)
) as usize).to_formatted_string(&Locale::se);
println!(
"{} {:>10} requests/second, {:>8.2} ns/request",
$request_type,
per_second,
$data.1 / ($num_rounds as f64)
);
};
}
fn main(){
connect::bench();
let num_rounds = 20;
println!("");
let mut connect_data = (0.0, 0.0);
let mut announce_data = (0.0, 0.0);
let mut scrape_data = (0.0, 0.0);
::std::thread::sleep(Duration::from_secs(1));
for round in 0..num_rounds {
println!("# Round {}/{}\n", round + 1, num_rounds);
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
let info_hashes = create_info_hashes(&mut rng);
let state = State::new();
let d = connect::bench();
connect_data.0 += d.0;
connect_data.1 += d.1;
announce::bench(&mut rng, &state, &info_hashes);
println!("");
state.connections.clear();
::std::thread::sleep(Duration::from_millis(100));
println!("");
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
let info_hashes = create_info_hashes(&mut rng);
let state = State::new();
::std::thread::sleep(Duration::from_secs(1));
let d = announce::bench(&mut rng, &state, &info_hashes);
announce_data.0 += d.0;
announce_data.1 += d.1;
scrape::bench(&mut rng, &state, &info_hashes);
state.connections.clear();
println!("");
::std::thread::sleep(Duration::from_millis(100));
let d = scrape::bench(&mut rng, &state, &info_hashes);
scrape_data.0 += d.0;
scrape_data.1 += d.1;
println!();
}
println!("# Average results over {} rounds\n", num_rounds);
print_results!("connect handler: ", num_rounds, connect_data);
print_results!("announce handler:", num_rounds, announce_data);
print_results!("scrape handler: ", num_rounds, scrape_data);
}

View file

@ -19,8 +19,8 @@ pub fn bench(
rng: &mut impl Rng,
state: &State,
info_hashes: &Vec<InfoHash>
){
println!("# benchmark: handle_scrape_requests\n");
) -> (f64, f64) {
println!("## benchmark: handle_scrape_requests\n");
println!("generating data..");
let mut responses = Vec::with_capacity(SCRAPE_REQUESTS);
@ -40,7 +40,7 @@ pub fn bench(
let scrape_requests = scrape_requests.drain(..);
::std::thread::sleep(Duration::from_secs(1));
::std::thread::sleep(Duration::from_millis(100));
let now = Instant::now();
@ -54,8 +54,11 @@ pub fn bench(
let duration = Instant::now() - now;
println!("\nrequests/second: {:.2}", SCRAPE_REQUESTS as f64 / (duration.as_millis() as f64 / 1000.0));
println!("time per request: {:.2}ns", duration.as_nanos() as f64 / SCRAPE_REQUESTS as f64);
let requests_per_second = SCRAPE_REQUESTS as f64 / (duration.as_millis() as f64 / 1000.0);
let time_per_request = duration.as_nanos() as f64 / SCRAPE_REQUESTS as f64;
println!("\nrequests/second: {:.2}", requests_per_second);
println!("time per request: {:.2}ns", time_per_request);
let mut total_num_peers = 0.0f64;
let mut num_responses: usize = 0;
@ -76,6 +79,8 @@ pub fn bench(
}
println!("avg num peers reported: {:.2}", total_num_peers / (SCRAPE_REQUESTS as f64 * SCRAPE_NUM_HASHES as f64));
(requests_per_second, time_per_request)
}