Move all crates to new crates dir

This commit is contained in:
Joakim Frostegård 2023-10-18 23:53:41 +02:00
parent 3835da22ac
commit 9b032f7e24
128 changed files with 27 additions and 26 deletions

View file

@ -0,0 +1,35 @@
[package]
name = "aquatic_http_load_test"
description = "BitTorrent (HTTP over TLS) load tester"
keywords = ["http", "benchmark", "peer-to-peer", "torrent", "bittorrent"]
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
repository.workspace = true
readme.workspace = true
rust-version.workspace = true
[[bin]]
name = "aquatic_http_load_test"
[dependencies]
aquatic_common = { workspace = true, features = ["glommio"] }
aquatic_http_protocol.workspace = true
aquatic_toml_config.workspace = true
anyhow = "1"
futures-lite = "1"
futures-rustls = "0.24"
hashbrown = "0.14"
glommio = "0.8"
log = "0.4"
mimalloc = { version = "0.1", default-features = false }
rand = { version = "0.8", features = ["small_rng"] }
rand_distr = "0.4"
rustls = { version = "0.21", default-features = false, features = ["logging", "dangerous_configuration"] } # TLS 1.2 disabled
serde = { version = "1", features = ["derive"] }
[dev-dependencies]
quickcheck = "1"
quickcheck_macros = "1"

View file

@ -0,0 +1,39 @@
use std::sync::{atomic::AtomicUsize, Arc};
use rand_distr::Gamma;
pub use aquatic_http_protocol::common::*;
pub use aquatic_http_protocol::request::*;
pub use aquatic_http_protocol::response::*;
#[derive(PartialEq, Eq, Clone)]
pub struct TorrentPeer {
pub info_hash: InfoHash,
pub scrape_hash_indeces: Vec<usize>,
pub peer_id: PeerId,
pub port: u16,
}
#[derive(Default)]
pub struct Statistics {
pub requests: AtomicUsize,
pub response_peers: AtomicUsize,
pub responses_announce: AtomicUsize,
pub responses_scrape: AtomicUsize,
pub responses_failure: AtomicUsize,
pub bytes_sent: AtomicUsize,
pub bytes_received: AtomicUsize,
}
#[derive(Clone)]
pub struct LoadTestState {
pub info_hashes: Arc<Vec<InfoHash>>,
pub statistics: Arc<Statistics>,
pub gamma: Arc<Gamma<f64>>,
}
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum RequestType {
Announce,
Scrape,
}

View file

@ -0,0 +1,89 @@
use std::net::SocketAddr;
use aquatic_common::cli::LogLevel;
use aquatic_common::cpu_pinning::desc::CpuPinningConfigDesc;
use aquatic_toml_config::TomlConfig;
use serde::Deserialize;
/// aquatic_http_load_test configuration
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct Config {
pub server_address: SocketAddr,
pub log_level: LogLevel,
pub num_workers: usize,
/// Maximum number of connections to keep open
pub num_connections: usize,
/// How often to check if num_connections connections are open, and
/// open a new one otherwise. A value of 0 means that connections are
/// opened as quickly as possible, which is useful when the tracker
/// does not keep connections alive.
pub connection_creation_interval_ms: u64,
/// Announce/scrape url suffix. Use `/my_token/` to get `/announce/my_token/`
pub url_suffix: String,
pub duration: usize,
pub keep_alive: bool,
pub torrents: TorrentConfig,
pub cpu_pinning: CpuPinningConfigDesc,
}
impl aquatic_common::cli::Config for Config {
fn get_log_level(&self) -> Option<LogLevel> {
Some(self.log_level)
}
}
impl Default for Config {
fn default() -> Self {
Self {
server_address: "127.0.0.1:3000".parse().unwrap(),
log_level: LogLevel::Error,
num_workers: 1,
num_connections: 128,
connection_creation_interval_ms: 10,
url_suffix: "".into(),
duration: 0,
keep_alive: true,
torrents: TorrentConfig::default(),
cpu_pinning: Default::default(),
}
}
}
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct TorrentConfig {
pub number_of_torrents: usize,
/// Probability that a generated peer is a seeder
pub peer_seeder_probability: f64,
/// Probability that a generated request is a announce request, as part
/// of sum of the various weight arguments.
pub weight_announce: usize,
/// Probability that a generated request is a scrape request, as part
/// of sum of the various weight arguments.
pub weight_scrape: usize,
/// Peers choose torrents according to this Gamma distribution shape
pub torrent_gamma_shape: f64,
/// Peers choose torrents according to this Gamma distribution scale
pub torrent_gamma_scale: f64,
}
impl Default for TorrentConfig {
fn default() -> Self {
Self {
number_of_torrents: 10_000,
peer_seeder_probability: 0.25,
weight_announce: 5,
weight_scrape: 0,
torrent_gamma_shape: 0.2,
torrent_gamma_scale: 100.0,
}
}
}
#[cfg(test)]
mod tests {
use super::Config;
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
}

View file

@ -0,0 +1,204 @@
use std::sync::{atomic::Ordering, Arc};
use std::thread;
use std::time::{Duration, Instant};
use ::glommio::LocalExecutorBuilder;
use aquatic_common::cpu_pinning::glommio::{get_worker_placement, set_affinity_for_util_worker};
use aquatic_common::cpu_pinning::WorkerIndex;
use rand::prelude::*;
use rand_distr::Gamma;
mod common;
mod config;
mod network;
mod utils;
use common::*;
use config::*;
use network::*;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
/// Multiply bytes during a second with this to get Mbit/s
const MBITS_FACTOR: f64 = 1.0 / ((1024.0 * 1024.0) / 8.0);
pub fn main() {
aquatic_common::cli::run_app_with_cli_and_config::<Config>(
"aquatic_http_load_test: BitTorrent load tester",
env!("CARGO_PKG_VERSION"),
run,
None,
)
}
fn run(config: Config) -> ::anyhow::Result<()> {
if config.torrents.weight_announce + config.torrents.weight_scrape == 0 {
panic!("Error: at least one weight must be larger than zero.");
}
println!("Starting client with config: {:#?}", config);
let mut info_hashes = Vec::with_capacity(config.torrents.number_of_torrents);
let mut rng = SmallRng::from_entropy();
for _ in 0..config.torrents.number_of_torrents {
info_hashes.push(InfoHash(rng.gen()));
}
let gamma = Gamma::new(
config.torrents.torrent_gamma_shape,
config.torrents.torrent_gamma_scale,
)
.unwrap();
let state = LoadTestState {
info_hashes: Arc::new(info_hashes),
statistics: Arc::new(Statistics::default()),
gamma: Arc::new(gamma),
};
let tls_config = create_tls_config().unwrap();
for i in 0..config.num_workers {
let config = config.clone();
let tls_config = tls_config.clone();
let state = state.clone();
let placement = get_worker_placement(
&config.cpu_pinning,
config.num_workers,
0,
WorkerIndex::SocketWorker(i),
)?;
LocalExecutorBuilder::new(placement)
.name("load-test")
.spawn(move || async move {
run_socket_thread(config, tls_config, state).await.unwrap();
})
.unwrap();
}
if config.cpu_pinning.active {
set_affinity_for_util_worker(&config.cpu_pinning, config.num_workers, 0)?;
}
monitor_statistics(state, &config);
Ok(())
}
fn monitor_statistics(state: LoadTestState, config: &Config) {
let start_time = Instant::now();
let mut report_avg_response_vec: Vec<f64> = Vec::new();
let interval = 5;
let interval_f64 = interval as f64;
loop {
thread::sleep(Duration::from_secs(interval));
let statistics = state.statistics.as_ref();
let responses_announce = statistics
.responses_announce
.fetch_and(0, Ordering::Relaxed) as f64;
// let response_peers = statistics.response_peers
// .fetch_and(0, Ordering::SeqCst) as f64;
let requests_per_second =
statistics.requests.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_scrape_per_second =
statistics.responses_scrape.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_failure_per_second =
statistics.responses_failure.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let bytes_sent_per_second =
statistics.bytes_sent.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let bytes_received_per_second =
statistics.bytes_received.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_announce_per_second = responses_announce / interval_f64;
let responses_per_second = responses_announce_per_second
+ responses_scrape_per_second
+ responses_failure_per_second;
report_avg_response_vec.push(responses_per_second);
println!();
println!("Requests out: {:.2}/second", requests_per_second);
println!("Responses in: {:.2}/second", responses_per_second);
println!(
" - Announce responses: {:.2}",
responses_announce_per_second
);
println!(" - Scrape responses: {:.2}", responses_scrape_per_second);
println!(
" - Failure responses: {:.2}",
responses_failure_per_second
);
//println!("Peers per announce response: {:.2}", response_peers / responses_announce);
println!(
"Bandwidth out: {:.2}Mbit/s",
bytes_sent_per_second * MBITS_FACTOR
);
println!(
"Bandwidth in: {:.2}Mbit/s",
bytes_received_per_second * MBITS_FACTOR
);
let time_elapsed = start_time.elapsed();
let duration = Duration::from_secs(config.duration as u64);
if config.duration != 0 && time_elapsed >= duration {
let report_len = report_avg_response_vec.len() as f64;
let report_sum: f64 = report_avg_response_vec.into_iter().sum();
let report_avg: f64 = report_sum / report_len;
println!(
concat!(
"\n# aquatic load test report\n\n",
"Test ran for {} seconds.\n",
"Average responses per second: {:.2}\n\nConfig: {:#?}\n"
),
time_elapsed.as_secs(),
report_avg,
config
);
break;
}
}
}
struct FakeCertificateVerifier;
impl rustls::client::ServerCertVerifier for FakeCertificateVerifier {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
fn create_tls_config() -> anyhow::Result<Arc<rustls::ClientConfig>> {
let mut config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(rustls::RootCertStore::empty())
.with_no_client_auth();
config
.dangerous()
.set_certificate_verifier(Arc::new(FakeCertificateVerifier));
Ok(Arc::new(config))
}

View file

@ -0,0 +1,254 @@
use std::{
cell::RefCell,
convert::TryInto,
io::Cursor,
rc::Rc,
sync::{atomic::Ordering, Arc},
time::Duration,
};
use aquatic_http_protocol::response::Response;
use futures_lite::{AsyncReadExt, AsyncWriteExt};
use futures_rustls::{client::TlsStream, TlsConnector};
use glommio::net::TcpStream;
use glommio::{prelude::*, timer::TimerActionRepeat};
use rand::{prelude::SmallRng, SeedableRng};
use crate::{common::LoadTestState, config::Config, utils::create_random_request};
pub async fn run_socket_thread(
config: Config,
tls_config: Arc<rustls::ClientConfig>,
load_test_state: LoadTestState,
) -> anyhow::Result<()> {
let config = Rc::new(config);
let num_active_connections = Rc::new(RefCell::new(0usize));
let rng = Rc::new(RefCell::new(SmallRng::from_entropy()));
let interval = config.connection_creation_interval_ms;
if interval == 0 {
loop {
if *num_active_connections.borrow() < config.num_connections {
if let Err(err) = Connection::run(
config.clone(),
tls_config.clone(),
load_test_state.clone(),
num_active_connections.clone(),
rng.clone(),
)
.await
{
::log::error!("connection creation error: {:?}", err);
}
}
}
} else {
let interval = Duration::from_millis(interval);
TimerActionRepeat::repeat(move || {
periodically_open_connections(
config.clone(),
interval,
tls_config.clone(),
load_test_state.clone(),
num_active_connections.clone(),
rng.clone(),
)
});
}
futures_lite::future::pending::<bool>().await;
Ok(())
}
async fn periodically_open_connections(
config: Rc<Config>,
interval: Duration,
tls_config: Arc<rustls::ClientConfig>,
load_test_state: LoadTestState,
num_active_connections: Rc<RefCell<usize>>,
rng: Rc<RefCell<SmallRng>>,
) -> Option<Duration> {
if *num_active_connections.borrow() < config.num_connections {
spawn_local(async move {
if let Err(err) = Connection::run(
config,
tls_config,
load_test_state,
num_active_connections,
rng.clone(),
)
.await
{
::log::error!("connection creation error: {:?}", err);
}
})
.detach();
}
Some(interval)
}
struct Connection {
config: Rc<Config>,
load_test_state: LoadTestState,
rng: Rc<RefCell<SmallRng>>,
stream: TlsStream<TcpStream>,
buffer: [u8; 2048],
}
impl Connection {
async fn run(
config: Rc<Config>,
tls_config: Arc<rustls::ClientConfig>,
load_test_state: LoadTestState,
num_active_connections: Rc<RefCell<usize>>,
rng: Rc<RefCell<SmallRng>>,
) -> anyhow::Result<()> {
let stream = TcpStream::connect(config.server_address)
.await
.map_err(|err| anyhow::anyhow!("connect: {:?}", err))?;
let stream = TlsConnector::from(tls_config)
.connect("example.com".try_into().unwrap(), stream)
.await?;
let mut connection = Connection {
config,
load_test_state,
rng,
stream,
buffer: [0; 2048],
};
*num_active_connections.borrow_mut() += 1;
if let Err(err) = connection.run_connection_loop().await {
::log::info!("connection error: {:?}", err);
}
*num_active_connections.borrow_mut() -= 1;
Ok(())
}
async fn run_connection_loop(&mut self) -> anyhow::Result<()> {
loop {
self.send_request().await?;
self.read_response().await?;
if !self.config.keep_alive {
break Ok(());
}
}
}
async fn send_request(&mut self) -> anyhow::Result<()> {
let request = create_random_request(
&self.config,
&self.load_test_state,
&mut self.rng.borrow_mut(),
);
let mut cursor = Cursor::new(&mut self.buffer[..]);
request.write(&mut cursor, self.config.url_suffix.as_bytes())?;
let cursor_position = cursor.position() as usize;
let bytes_sent = self
.stream
.write(&cursor.into_inner()[..cursor_position])
.await?;
self.stream.flush().await?;
self.load_test_state
.statistics
.bytes_sent
.fetch_add(bytes_sent, Ordering::Relaxed);
self.load_test_state
.statistics
.requests
.fetch_add(1, Ordering::Relaxed);
Ok(())
}
async fn read_response(&mut self) -> anyhow::Result<()> {
let mut buffer_position = 0;
loop {
let bytes_read = self
.stream
.read(&mut self.buffer[buffer_position..])
.await?;
if bytes_read == 0 {
break;
}
buffer_position += bytes_read;
let interesting_bytes = &self.buffer[..buffer_position];
let mut opt_body_start_index = None;
for (i, chunk) in interesting_bytes.windows(4).enumerate() {
if chunk == b"\r\n\r\n" {
opt_body_start_index = Some(i + 4);
break;
}
}
if let Some(body_start_index) = opt_body_start_index {
match Response::from_bytes(&interesting_bytes[body_start_index..]) {
Ok(response) => {
match response {
Response::Announce(_) => {
self.load_test_state
.statistics
.responses_announce
.fetch_add(1, Ordering::Relaxed);
}
Response::Scrape(_) => {
self.load_test_state
.statistics
.responses_scrape
.fetch_add(1, Ordering::Relaxed);
}
Response::Failure(response) => {
self.load_test_state
.statistics
.responses_failure
.fetch_add(1, Ordering::Relaxed);
println!("failure response: reason: {}", response.failure_reason);
}
}
self.load_test_state
.statistics
.bytes_received
.fetch_add(interesting_bytes.len(), Ordering::Relaxed);
break;
}
Err(err) => {
::log::warn!(
"deserialize response error with {} bytes read: {:?}, text: {}",
buffer_position,
err,
interesting_bytes.escape_ascii()
);
}
}
}
}
Ok(())
}
}

View file

@ -0,0 +1,81 @@
use std::sync::Arc;
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use rand_distr::Gamma;
use crate::common::*;
use crate::config::*;
pub fn create_random_request(
config: &Config,
state: &LoadTestState,
rng: &mut SmallRng,
) -> Request {
let weights = [
config.torrents.weight_announce as u32,
config.torrents.weight_scrape as u32,
];
let items = [RequestType::Announce, RequestType::Scrape];
let dist = WeightedIndex::new(&weights).expect("random request weighted index");
match items[dist.sample(rng)] {
RequestType::Announce => create_announce_request(config, state, rng),
RequestType::Scrape => create_scrape_request(config, state, rng),
}
}
#[inline]
fn create_announce_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> Request {
let (event, bytes_left) = {
if rng.gen_bool(config.torrents.peer_seeder_probability) {
(AnnounceEvent::Completed, 0)
} else {
(AnnounceEvent::Started, 50)
}
};
let info_hash_index = select_info_hash_index(config, &state, rng);
Request::Announce(AnnounceRequest {
info_hash: state.info_hashes[info_hash_index],
peer_id: PeerId(rng.gen()),
bytes_left,
event,
key: None,
numwant: None,
port: rng.gen(),
bytes_uploaded: 0,
bytes_downloaded: 0,
})
}
#[inline]
fn create_scrape_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> Request {
let mut scrape_hashes = Vec::with_capacity(5);
for _ in 0..5 {
let info_hash_index = select_info_hash_index(config, &state, rng);
scrape_hashes.push(state.info_hashes[info_hash_index]);
}
Request::Scrape(ScrapeRequest {
info_hashes: scrape_hashes,
})
}
#[inline]
fn select_info_hash_index(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> usize {
gamma_usize(rng, &state.gamma, config.torrents.number_of_torrents - 1)
}
#[inline]
fn gamma_usize(rng: &mut impl Rng, gamma: &Arc<Gamma<f64>>, max: usize) -> usize {
let p: f64 = gamma.sample(rng);
let p = (p.min(101.0f64) - 1.0) / 100.0;
(p * max as f64) as usize
}