Move all crates to new crates dir

This commit is contained in:
Joakim Frostegård 2023-10-18 23:53:41 +02:00
parent 3835da22ac
commit 9b032f7e24
128 changed files with 27 additions and 26 deletions

View file

@ -0,0 +1,30 @@
use std::sync::{atomic::AtomicUsize, Arc};
use rand_distr::Gamma;
pub use aquatic_ws_protocol::*;
#[derive(Default)]
pub struct Statistics {
pub requests: AtomicUsize,
pub response_peers: AtomicUsize,
pub responses_announce: AtomicUsize,
pub responses_offer: AtomicUsize,
pub responses_answer: AtomicUsize,
pub responses_scrape: AtomicUsize,
pub responses_error: AtomicUsize,
pub connections: AtomicUsize,
}
#[derive(Clone)]
pub struct LoadTestState {
pub info_hashes: Arc<Vec<InfoHash>>,
pub statistics: Arc<Statistics>,
pub gamma: Arc<Gamma<f64>>,
}
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum RequestType {
Announce,
Scrape,
}

View file

@ -0,0 +1,83 @@
use std::net::SocketAddr;
use aquatic_common::cli::LogLevel;
use aquatic_common::cpu_pinning::desc::CpuPinningConfigDesc;
use aquatic_toml_config::TomlConfig;
use serde::Deserialize;
/// aquatic_ws_load_test configuration
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct Config {
pub server_address: SocketAddr,
pub log_level: LogLevel,
pub num_workers: usize,
pub num_connections_per_worker: usize,
pub connection_creation_interval_ms: u64,
pub duration: usize,
pub measure_after_max_connections_reached: bool,
pub torrents: TorrentConfig,
pub cpu_pinning: CpuPinningConfigDesc,
}
impl aquatic_common::cli::Config for Config {
fn get_log_level(&self) -> Option<LogLevel> {
Some(self.log_level)
}
}
impl Default for Config {
fn default() -> Self {
Self {
server_address: "127.0.0.1:3000".parse().unwrap(),
log_level: LogLevel::Warn,
num_workers: 1,
num_connections_per_worker: 16,
connection_creation_interval_ms: 10,
duration: 0,
measure_after_max_connections_reached: true,
torrents: TorrentConfig::default(),
cpu_pinning: Default::default(),
}
}
}
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
#[serde(default, deny_unknown_fields)]
pub struct TorrentConfig {
pub offers_per_request: usize,
pub number_of_torrents: usize,
/// Probability that a generated peer is a seeder
pub peer_seeder_probability: f64,
/// Probability that a generated request is a announce request, as part
/// of sum of the various weight arguments.
pub weight_announce: usize,
/// Probability that a generated request is a scrape request, as part
/// of sum of the various weight arguments.
pub weight_scrape: usize,
/// Peers choose torrents according to this Gamma distribution shape
pub torrent_gamma_shape: f64,
/// Peers choose torrents according to this Gamma distribution scale
pub torrent_gamma_scale: f64,
}
impl Default for TorrentConfig {
fn default() -> Self {
Self {
offers_per_request: 10,
number_of_torrents: 10_000,
peer_seeder_probability: 0.25,
weight_announce: 5,
weight_scrape: 0,
torrent_gamma_shape: 0.2,
torrent_gamma_scale: 100.0,
}
}
}
#[cfg(test)]
mod tests {
use super::Config;
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
}

View file

@ -0,0 +1,221 @@
use std::sync::{atomic::Ordering, Arc};
use std::thread;
use std::time::{Duration, Instant};
use aquatic_common::cpu_pinning::glommio::{get_worker_placement, set_affinity_for_util_worker};
use aquatic_common::cpu_pinning::WorkerIndex;
use glommio::LocalExecutorBuilder;
use rand::prelude::*;
use rand_distr::Gamma;
mod common;
mod config;
mod network;
mod utils;
use common::*;
use config::*;
use network::*;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
pub fn main() {
aquatic_common::cli::run_app_with_cli_and_config::<Config>(
"aquatic_ws_load_test: WebTorrent load tester",
env!("CARGO_PKG_VERSION"),
run,
None,
)
}
fn run(config: Config) -> ::anyhow::Result<()> {
if config.torrents.weight_announce + config.torrents.weight_scrape == 0 {
panic!("Error: at least one weight must be larger than zero.");
}
println!("Starting client with config: {:#?}", config);
let mut info_hashes = Vec::with_capacity(config.torrents.number_of_torrents);
let mut rng = SmallRng::from_entropy();
for _ in 0..config.torrents.number_of_torrents {
info_hashes.push(InfoHash(rng.gen()));
}
let gamma = Gamma::new(
config.torrents.torrent_gamma_shape,
config.torrents.torrent_gamma_scale,
)
.unwrap();
let state = LoadTestState {
info_hashes: Arc::new(info_hashes),
statistics: Arc::new(Statistics::default()),
gamma: Arc::new(gamma),
};
let tls_config = create_tls_config().unwrap();
for i in 0..config.num_workers {
let config = config.clone();
let tls_config = tls_config.clone();
let state = state.clone();
let placement = get_worker_placement(
&config.cpu_pinning,
config.num_workers,
0,
WorkerIndex::SocketWorker(i),
)?;
LocalExecutorBuilder::new(placement)
.name("load-test")
.spawn(move || async move {
run_socket_thread(config, tls_config, state).await.unwrap();
})
.unwrap();
}
if config.cpu_pinning.active {
set_affinity_for_util_worker(&config.cpu_pinning, config.num_workers, 0)?;
}
monitor_statistics(state, &config);
Ok(())
}
struct FakeCertificateVerifier;
impl rustls::client::ServerCertVerifier for FakeCertificateVerifier {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
fn create_tls_config() -> anyhow::Result<Arc<rustls::ClientConfig>> {
let mut config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(rustls::RootCertStore::empty())
.with_no_client_auth();
config
.dangerous()
.set_certificate_verifier(Arc::new(FakeCertificateVerifier));
Ok(Arc::new(config))
}
fn monitor_statistics(state: LoadTestState, config: &Config) {
let start_time = Instant::now();
let mut time_max_connections_reached = None;
let mut report_avg_response_vec: Vec<f64> = Vec::new();
let interval = 5;
let interval_f64 = interval as f64;
loop {
thread::sleep(Duration::from_secs(interval));
let statistics = state.statistics.as_ref();
let responses_announce = statistics
.responses_announce
.fetch_and(0, Ordering::Relaxed) as f64;
// let response_peers = statistics.response_peers
// .fetch_and(0, Ordering::Relaxed) as f64;
let requests_per_second =
statistics.requests.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_offer_per_second =
statistics.responses_offer.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_answer_per_second =
statistics.responses_answer.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_scrape_per_second =
statistics.responses_scrape.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_error_per_second =
statistics.responses_error.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
let responses_announce_per_second = responses_announce / interval_f64;
let connections = statistics.connections.load(Ordering::Relaxed);
let responses_per_second = responses_announce_per_second
+ responses_offer_per_second
+ responses_answer_per_second
+ responses_scrape_per_second
+ responses_error_per_second;
if !config.measure_after_max_connections_reached || time_max_connections_reached.is_some() {
report_avg_response_vec.push(responses_per_second);
} else if connections >= config.num_workers * config.num_connections_per_worker {
time_max_connections_reached = Some(Instant::now());
println!();
println!("Max connections reached");
println!();
}
println!();
println!("Requests out: {:.2}/second", requests_per_second);
println!("Responses in: {:.2}/second", responses_per_second);
println!(
" - Announce responses: {:.2}",
responses_announce_per_second
);
println!(" - Offer responses: {:.2}", responses_offer_per_second);
println!(" - Answer responses: {:.2}", responses_answer_per_second);
println!(" - Scrape responses: {:.2}", responses_scrape_per_second);
println!(" - Error responses: {:.2}", responses_error_per_second);
println!("Active connections: {}", connections);
if config.measure_after_max_connections_reached {
if let Some(start) = time_max_connections_reached {
let time_elapsed = start.elapsed();
if config.duration != 0
&& time_elapsed >= Duration::from_secs(config.duration as u64)
{
report(config, report_avg_response_vec, time_elapsed);
break;
}
}
} else {
let time_elapsed = start_time.elapsed();
if config.duration != 0 && time_elapsed >= Duration::from_secs(config.duration as u64) {
report(config, report_avg_response_vec, time_elapsed);
break;
}
}
}
}
fn report(config: &Config, report_avg_response_vec: Vec<f64>, time_elapsed: Duration) {
let report_len = report_avg_response_vec.len() as f64;
let report_sum: f64 = report_avg_response_vec.into_iter().sum();
let report_avg: f64 = report_sum / report_len;
println!(
concat!(
"\n# aquatic load test report\n\n",
"Test ran for {} seconds.\n",
"Average responses per second: {:.2}\n\nConfig: {:#?}\n"
),
time_elapsed.as_secs(),
report_avg,
config
);
}

View file

@ -0,0 +1,236 @@
use std::{
cell::RefCell,
convert::TryInto,
rc::Rc,
sync::{atomic::Ordering, Arc},
time::Duration,
};
use aquatic_ws_protocol::{InMessage, JsonValue, OfferId, OutMessage, PeerId};
use async_tungstenite::{client_async, WebSocketStream};
use futures::{SinkExt, StreamExt};
use futures_rustls::{client::TlsStream, TlsConnector};
use glommio::net::TcpStream;
use glommio::{prelude::*, timer::TimerActionRepeat};
use rand::{prelude::SmallRng, Rng, SeedableRng};
use crate::{common::LoadTestState, config::Config, utils::create_random_request};
pub async fn run_socket_thread(
config: Config,
tls_config: Arc<rustls::ClientConfig>,
load_test_state: LoadTestState,
) -> anyhow::Result<()> {
let config = Rc::new(config);
let num_active_connections = Rc::new(RefCell::new(0usize));
TimerActionRepeat::repeat(move || {
periodically_open_connections(
config.clone(),
tls_config.clone(),
load_test_state.clone(),
num_active_connections.clone(),
)
});
futures::future::pending::<bool>().await;
Ok(())
}
async fn periodically_open_connections(
config: Rc<Config>,
tls_config: Arc<rustls::ClientConfig>,
load_test_state: LoadTestState,
num_active_connections: Rc<RefCell<usize>>,
) -> Option<Duration> {
let wait = Duration::from_millis(config.connection_creation_interval_ms);
if *num_active_connections.borrow() < config.num_connections_per_worker {
spawn_local(async move {
if let Err(err) =
Connection::run(config, tls_config, load_test_state, num_active_connections).await
{
::log::info!("connection creation error: {:#}", err);
}
})
.detach();
}
Some(wait)
}
struct Connection {
config: Rc<Config>,
load_test_state: LoadTestState,
rng: SmallRng,
can_send: bool,
peer_id: PeerId,
send_answer: Option<(PeerId, OfferId)>,
stream: WebSocketStream<TlsStream<TcpStream>>,
}
impl Connection {
async fn run(
config: Rc<Config>,
tls_config: Arc<rustls::ClientConfig>,
load_test_state: LoadTestState,
num_active_connections: Rc<RefCell<usize>>,
) -> anyhow::Result<()> {
let mut rng = SmallRng::from_entropy();
let peer_id = PeerId(rng.gen());
let stream = TcpStream::connect(config.server_address)
.await
.map_err(|err| anyhow::anyhow!("connect: {:?}", err))?;
let stream = TlsConnector::from(tls_config)
.connect("example.com".try_into().unwrap(), stream)
.await?;
let request = format!(
"ws://{}:{}",
config.server_address.ip(),
config.server_address.port()
);
let (stream, _) = client_async(request, stream).await?;
let statistics = load_test_state.statistics.clone();
let mut connection = Connection {
config,
load_test_state,
rng,
stream,
can_send: true,
peer_id,
send_answer: None,
};
*num_active_connections.borrow_mut() += 1;
statistics.connections.fetch_add(1, Ordering::Relaxed);
if let Err(err) = connection.run_connection_loop().await {
::log::info!("connection error: {:#}", err);
}
*num_active_connections.borrow_mut() -= 1;
statistics.connections.fetch_sub(1, Ordering::Relaxed);
Ok(())
}
async fn run_connection_loop(&mut self) -> anyhow::Result<()> {
loop {
if self.can_send {
let request = create_random_request(
&self.config,
&self.load_test_state,
&mut self.rng,
self.peer_id,
);
// If self.send_answer is set and request is announce request, make
// the request an offer answer
let request = if let InMessage::AnnounceRequest(mut r) = request {
if let Some((peer_id, offer_id)) = self.send_answer {
r.to_peer_id = Some(peer_id);
r.offer_id = Some(offer_id);
r.answer = Some(JsonValue(::serde_json::json!(
{"sdp": "abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-"}
)));
r.event = None;
r.offers = None;
}
self.send_answer = None;
InMessage::AnnounceRequest(r)
} else {
request
};
self.stream.send(request.to_ws_message()).await?;
self.load_test_state
.statistics
.requests
.fetch_add(1, Ordering::Relaxed);
self.can_send = false;
}
self.read_message().await?;
}
}
async fn read_message(&mut self) -> anyhow::Result<()> {
let message = match self
.stream
.next()
.await
.ok_or_else(|| anyhow::anyhow!("stream finished"))??
{
message @ tungstenite::Message::Text(_) | message @ tungstenite::Message::Binary(_) => {
message
}
message => {
::log::warn!(
"Received WebSocket message of unexpected type: {:?}",
message
);
return Ok(());
}
};
match OutMessage::from_ws_message(message) {
Ok(OutMessage::Offer(offer)) => {
self.load_test_state
.statistics
.responses_offer
.fetch_add(1, Ordering::Relaxed);
self.send_answer = Some((offer.peer_id, offer.offer_id));
self.can_send = true;
}
Ok(OutMessage::Answer(_)) => {
self.load_test_state
.statistics
.responses_answer
.fetch_add(1, Ordering::Relaxed);
self.can_send = true;
}
Ok(OutMessage::AnnounceResponse(_)) => {
self.load_test_state
.statistics
.responses_announce
.fetch_add(1, Ordering::Relaxed);
self.can_send = true;
}
Ok(OutMessage::ScrapeResponse(_)) => {
self.load_test_state
.statistics
.responses_scrape
.fetch_add(1, Ordering::Relaxed);
self.can_send = true;
}
Ok(OutMessage::ErrorResponse(response)) => {
self.load_test_state
.statistics
.responses_error
.fetch_add(1, Ordering::Relaxed);
::log::warn!("received error response: {:?}", response.failure_reason);
self.can_send = true;
}
Err(err) => {
::log::error!("error deserializing message: {:#}", err);
}
}
Ok(())
}
}

View file

@ -0,0 +1,100 @@
use std::sync::Arc;
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use rand_distr::Gamma;
use crate::common::*;
use crate::config::*;
pub fn create_random_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
peer_id: PeerId,
) -> InMessage {
let weights = [
config.torrents.weight_announce as u32,
config.torrents.weight_scrape as u32,
];
let items = [RequestType::Announce, RequestType::Scrape];
let dist = WeightedIndex::new(&weights).expect("random request weighted index");
match items[dist.sample(rng)] {
RequestType::Announce => create_announce_request(config, state, rng, peer_id),
RequestType::Scrape => create_scrape_request(config, state, rng),
}
}
#[inline]
fn create_announce_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
peer_id: PeerId,
) -> InMessage {
let (event, bytes_left) = {
if rng.gen_bool(config.torrents.peer_seeder_probability) {
(AnnounceEvent::Completed, 0)
} else {
(AnnounceEvent::Started, 50)
}
};
let info_hash_index = select_info_hash_index(config, &state, rng);
let mut offers = Vec::with_capacity(config.torrents.offers_per_request);
for _ in 0..config.torrents.offers_per_request {
offers.push(AnnounceRequestOffer {
offer_id: OfferId(rng.gen()),
offer: JsonValue(::serde_json::json!(
{"sdp": "abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-abcdefg-"}
)),
})
}
InMessage::AnnounceRequest(AnnounceRequest {
action: AnnounceAction,
info_hash: state.info_hashes[info_hash_index],
peer_id,
bytes_left: Some(bytes_left),
event: Some(event),
numwant: Some(offers.len()),
offers: Some(offers),
answer: None,
to_peer_id: None,
offer_id: None,
})
}
#[inline]
fn create_scrape_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> InMessage {
let mut scrape_hashes = Vec::with_capacity(5);
for _ in 0..5 {
let info_hash_index = select_info_hash_index(config, &state, rng);
scrape_hashes.push(state.info_hashes[info_hash_index]);
}
InMessage::ScrapeRequest(ScrapeRequest {
action: ScrapeAction,
info_hashes: Some(ScrapeRequestInfoHashes::Multiple(scrape_hashes)),
})
}
#[inline]
fn select_info_hash_index(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> usize {
gamma_usize(rng, &state.gamma, config.torrents.number_of_torrents - 1)
}
#[inline]
fn gamma_usize(rng: &mut impl Rng, gamma: &Arc<Gamma<f64>>, max: usize) -> usize {
let p: f64 = gamma.sample(rng);
let p = (p.min(101.0f64) - 1.0) / 100.0;
(p * max as f64) as usize
}