rename aquatic to aquatic_udp, same for bench and load test crates

This commit is contained in:
Joakim Frostegård 2020-05-11 16:55:46 +02:00
parent f614bab03d
commit 1b8d74e26d
35 changed files with 53 additions and 53 deletions

View file

@ -0,0 +1,131 @@
use std::net::SocketAddr;
use std::time::{Duration, Instant};
use crossbeam_channel::{Sender, Receiver};
use indicatif::ProgressIterator;
use rand::Rng;
use rand_distr::Pareto;
use aquatic_udp::common::*;
use aquatic_udp::config::Config;
use aquatic_udp_bench::pareto_usize;
use crate::common::*;
use crate::config::BenchConfig;
pub fn bench_announce_handler(
state: &State,
bench_config: &BenchConfig,
aquatic_config: &Config,
request_sender: &Sender<(Request, SocketAddr)>,
response_receiver: &Receiver<(Response, SocketAddr)>,
rng: &mut impl Rng,
info_hashes: &Vec<InfoHash>,
) -> (usize, Duration) {
let requests = create_requests(
state,
rng,
info_hashes,
bench_config.num_announce_requests
);
let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads;
let mut num_responses = 0usize;
let mut dummy: u16 = rng.gen();
let pb = create_progress_bar("Announce", bench_config.num_rounds as u64);
// Start benchmark
let before = Instant::now();
for round in (0..bench_config.num_rounds).progress_with(pb){
for request_chunk in requests.chunks(p){
for (request, src) in request_chunk {
request_sender.send((request.clone().into(), *src)).unwrap();
}
while let Ok((Response::Announce(r), _)) = response_receiver.try_recv() {
num_responses += 1;
if let Some(last_peer) = r.peers.last(){
dummy ^= last_peer.port.0;
}
}
}
let total = bench_config.num_announce_requests * (round + 1);
while num_responses < total {
match response_receiver.recv(){
Ok((Response::Announce(r), _)) => {
num_responses += 1;
if let Some(last_peer) = r.peers.last(){
dummy ^= last_peer.port.0;
}
},
_ => {}
}
}
}
let elapsed = before.elapsed();
if dummy == 0 {
println!("dummy dummy");
}
(num_responses, elapsed)
}
pub fn create_requests(
state: &State,
rng: &mut impl Rng,
info_hashes: &Vec<InfoHash>,
number: usize,
) -> Vec<(AnnounceRequest, SocketAddr)> {
let pareto = Pareto::new(1., PARETO_SHAPE).unwrap();
let max_index = info_hashes.len() - 1;
let mut requests = Vec::new();
let connections = state.connections.lock();
let connection_keys: Vec<ConnectionKey> = connections.keys()
.take(number)
.cloned()
.collect();
for i in 0..number {
let info_hash_index = pareto_usize(rng, pareto, max_index);
// Will panic if less connection requests than announce requests
let connection_id = connection_keys[i].connection_id;
let src = connection_keys[i].socket_addr;
let request = AnnounceRequest {
connection_id,
transaction_id: TransactionId(rng.gen()),
info_hash: info_hashes[info_hash_index],
peer_id: PeerId(rng.gen()),
bytes_downloaded: NumberOfBytes(rng.gen()),
bytes_uploaded: NumberOfBytes(rng.gen()),
bytes_left: NumberOfBytes(rng.gen()),
event: AnnounceEvent::Started,
ip_address: None,
key: PeerKey(rng.gen()),
peers_wanted: NumberOfPeers(rng.gen()),
port: Port(rng.gen())
};
requests.push((request, src));
}
requests
}

View file

@ -0,0 +1,14 @@
use indicatif::{ProgressBar, ProgressStyle};
pub const PARETO_SHAPE: f64 = 0.1;
pub const NUM_INFO_HASHES: usize = 10_000;
pub fn create_progress_bar(name: &str, iterations: u64) -> ProgressBar {
let t = format!("{:<8} {}", name, "{wide_bar} {pos:>2}/{len:>2}");
let style = ProgressStyle::default_bar().template(&t);
ProgressBar::new(iterations).with_style(style)
}

View file

@ -0,0 +1,26 @@
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BenchConfig {
pub num_rounds: usize,
pub num_threads: usize,
pub num_connect_requests: usize,
pub num_announce_requests: usize,
pub num_scrape_requests: usize,
pub num_hashes_per_scrape_request: usize,
}
impl Default for BenchConfig {
fn default() -> Self {
Self {
num_rounds: 10,
num_threads: 2,
num_connect_requests: 5_000_000,
num_announce_requests: 2_000_000,
num_scrape_requests: 2_000_000,
num_hashes_per_scrape_request: 20,
}
}
}

View file

@ -0,0 +1,87 @@
use std::time::{Duration, Instant};
use crossbeam_channel::{Sender, Receiver};
use indicatif::ProgressIterator;
use rand::{Rng, SeedableRng, thread_rng, rngs::SmallRng};
use std::net::SocketAddr;
use aquatic_udp::common::*;
use aquatic_udp::config::Config;
use crate::common::*;
use crate::config::BenchConfig;
pub fn bench_connect_handler(
bench_config: &BenchConfig,
aquatic_config: &Config,
request_sender: &Sender<(Request, SocketAddr)>,
response_receiver: &Receiver<(Response, SocketAddr)>,
) -> (usize, Duration) {
let requests = create_requests(
bench_config.num_connect_requests
);
let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads;
let mut num_responses = 0usize;
let mut dummy: i64 = thread_rng().gen();
let pb = create_progress_bar("Connect", bench_config.num_rounds as u64);
// Start connect benchmark
let before = Instant::now();
for round in (0..bench_config.num_rounds).progress_with(pb){
for request_chunk in requests.chunks(p){
for (request, src) in request_chunk {
request_sender.send((request.clone().into(), *src)).unwrap();
}
while let Ok((Response::Connect(r), _)) = response_receiver.try_recv() {
num_responses += 1;
dummy ^= r.connection_id.0;
}
}
let total = bench_config.num_connect_requests * (round + 1);
while num_responses < total {
match response_receiver.recv(){
Ok((Response::Connect(r), _)) => {
num_responses += 1;
dummy ^= r.connection_id.0;
},
_ => {}
}
}
}
let elapsed = before.elapsed();
if dummy == 0 {
println!("dummy dummy");
}
(num_responses, elapsed)
}
pub fn create_requests(number: usize) -> Vec<(ConnectRequest, SocketAddr)> {
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
let mut requests = Vec::new();
for _ in 0..number {
let request = ConnectRequest {
transaction_id: TransactionId(rng.gen()),
};
let src = SocketAddr::from(([rng.gen(), rng.gen(), rng.gen(), rng.gen()], rng.gen()));
requests.push((request, src));
}
requests
}

View file

@ -0,0 +1,146 @@
//! Benchmark announce and scrape handlers
//!
//! Example outputs:
//! ```
//! # Results over 20 rounds with 1 threads
//! Connect: 2 306 637 requests/second, 433.53 ns/request
//! Announce: 688 391 requests/second, 1452.66 ns/request
//! Scrape: 1 505 700 requests/second, 664.14 ns/request
//! ```
//! ```
//! # Results over 20 rounds with 2 threads
//! Connect: 3 472 434 requests/second, 287.98 ns/request
//! Announce: 739 371 requests/second, 1352.50 ns/request
//! Scrape: 1 845 253 requests/second, 541.93 ns/request
//! ```
use crossbeam_channel::unbounded;
use std::time::Duration;
use num_format::{Locale, ToFormattedString};
use rand::{Rng, thread_rng, rngs::SmallRng, SeedableRng};
use aquatic_udp::common::*;
use aquatic_udp::config::Config;
use aquatic_udp::handlers;
use cli_helpers::run_app_with_cli_and_config;
use config::BenchConfig;
mod announce;
mod common;
mod config;
mod connect;
mod scrape;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
fn main(){
run_app_with_cli_and_config::<BenchConfig>(
"aquatic benchmarker",
run
)
}
pub fn run(bench_config: BenchConfig){
// Setup common state, spawn request handlers
let state = State::new();
let aquatic_config = Config::default();
let (request_sender, request_receiver) = unbounded();
let (response_sender, response_receiver) = unbounded();
for _ in 0..bench_config.num_threads {
let state = state.clone();
let config = aquatic_config.clone();
let request_receiver = request_receiver.clone();
let response_sender = response_sender.clone();
::std::thread::spawn(move || {
handlers::run_request_worker(
state,
config,
request_receiver,
response_sender
)
});
}
// Run benchmarks
let c = connect::bench_connect_handler(
&bench_config,
&aquatic_config,
&request_sender,
&response_receiver,
);
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
let info_hashes = create_info_hashes(&mut rng);
let a = announce::bench_announce_handler(
&state,
&bench_config,
&aquatic_config,
&request_sender,
&response_receiver,
&mut rng,
&info_hashes
);
let s = scrape::bench_scrape_handler(
&state,
&bench_config,
&aquatic_config,
&request_sender,
&response_receiver,
&mut rng,
&info_hashes
);
println!(
"\n# Results over {} rounds with {} threads",
bench_config.num_rounds,
bench_config.num_threads,
);
print_results("Connect: ", c.0, c.1);
print_results("Announce:", a.0, a.1);
print_results("Scrape: ", s.0, s.1);
}
pub fn print_results(
request_type: &str,
num_responses: usize,
duration: Duration,
) {
let per_second = (
(num_responses as f64 / (duration.as_micros() as f64 / 1000000.0)
) as usize).to_formatted_string(&Locale::se);
let time_per_request = duration.as_nanos() as f64 / (num_responses as f64);
println!(
"{} {:>10} requests/second, {:>8.2} ns/request",
request_type,
per_second,
time_per_request,
);
}
fn create_info_hashes(rng: &mut impl Rng) -> Vec<InfoHash> {
let mut info_hashes = Vec::new();
for _ in 0..common::NUM_INFO_HASHES {
info_hashes.push(InfoHash(rng.gen()));
}
info_hashes
}

View file

@ -0,0 +1,130 @@
use std::net::SocketAddr;
use std::time::{Duration, Instant};
use crossbeam_channel::{Sender, Receiver};
use indicatif::ProgressIterator;
use rand::Rng;
use rand_distr::Pareto;
use aquatic_udp::common::*;
use aquatic_udp::config::Config;
use aquatic_udp_bench::pareto_usize;
use crate::common::*;
use crate::config::BenchConfig;
pub fn bench_scrape_handler(
state: &State,
bench_config: &BenchConfig,
aquatic_config: &Config,
request_sender: &Sender<(Request, SocketAddr)>,
response_receiver: &Receiver<(Response, SocketAddr)>,
rng: &mut impl Rng,
info_hashes: &Vec<InfoHash>,
) -> (usize, Duration) {
let requests = create_requests(
state,
rng,
info_hashes,
bench_config.num_scrape_requests,
bench_config.num_hashes_per_scrape_request,
);
let p = aquatic_config.handlers.max_requests_per_iter * bench_config.num_threads;
let mut num_responses = 0usize;
let mut dummy: i32 = rng.gen();
let pb = create_progress_bar("Scrape", bench_config.num_rounds as u64);
// Start benchmark
let before = Instant::now();
for round in (0..bench_config.num_rounds).progress_with(pb){
for request_chunk in requests.chunks(p){
for (request, src) in request_chunk {
request_sender.send((request.clone().into(), *src)).unwrap();
}
while let Ok((Response::Scrape(r), _)) = response_receiver.try_recv() {
num_responses += 1;
if let Some(stat) = r.torrent_stats.last(){
dummy ^= stat.leechers.0;
}
}
}
let total = bench_config.num_scrape_requests * (round + 1);
while num_responses < total {
match response_receiver.recv(){
Ok((Response::Scrape(r), _)) => {
num_responses += 1;
if let Some(stat) = r.torrent_stats.last(){
dummy ^= stat.leechers.0;
}
},
_ => {}
}
}
}
let elapsed = before.elapsed();
if dummy == 0 {
println!("dummy dummy");
}
(num_responses, elapsed)
}
pub fn create_requests(
state: &State,
rng: &mut impl Rng,
info_hashes: &Vec<InfoHash>,
number: usize,
hashes_per_request: usize,
) -> Vec<(ScrapeRequest, SocketAddr)> {
let pareto = Pareto::new(1., PARETO_SHAPE).unwrap();
let max_index = info_hashes.len() - 1;
let connections = state.connections.lock();
let connection_keys: Vec<ConnectionKey> = connections.keys()
.take(number)
.cloned()
.collect();
let mut requests = Vec::new();
for i in 0..number {
let mut request_info_hashes = Vec::new();
for _ in 0..hashes_per_request {
let info_hash_index = pareto_usize(rng, pareto, max_index);
request_info_hashes.push(info_hashes[info_hash_index])
}
// Will panic if less connection requests than scrape requests
let connection_id = connection_keys[i].connection_id;
let src = connection_keys[i].socket_addr;
let request = ScrapeRequest {
connection_id,
transaction_id: TransactionId(rng.gen()),
info_hashes: request_info_hashes,
};
requests.push((request, src));
}
requests
}

View file

@ -0,0 +1,44 @@
use plotly::{Plot, Scatter, Layout};
use plotly::common::Title;
use plotly::layout::Axis;
use rand::{thread_rng, rngs::SmallRng, SeedableRng};
use rand_distr::Pareto;
use aquatic_udp_bench::pareto_usize;
fn main(){
let mut plot = Plot::new();
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
const LEN: usize = 1_000;
const MAX_VAL: usize = LEN - 1;
for pareto_shape in [0.1, 0.2, 0.3, 0.4, 0.5].iter() {
let pareto = Pareto::new(1.0, *pareto_shape).unwrap();
let mut y_axis = [0; LEN];
for _ in 1..1_000_000 {
let index = pareto_usize(&mut rng, pareto, MAX_VAL);
y_axis[index] += 1;
}
let x_axis: Vec<usize> = (0..MAX_VAL).into_iter().collect();
let trace = Scatter::new(x_axis, y_axis.to_vec())
.name(&format!("pareto shape = {}", pareto_shape));
plot.add_trace(trace);
}
let layout = Layout::new()
.title(Title::new("Pareto distribution"))
.xaxis(Axis::new().title(Title::new("Info hash index")))
.yaxis(Axis::new().title(Title::new("Num requests")));
plot.set_layout(layout);
plot.show();
}

View file

@ -0,0 +1,14 @@
use rand::Rng;
use rand_distr::Pareto;
pub fn pareto_usize(
rng: &mut impl Rng,
pareto: Pareto<f64>,
max: usize,
) -> usize {
let p: f64 = rng.sample(pareto);
let p = (p.min(101.0f64) - 1.0) / 100.0;
(p * max as f64) as usize
}