mirror of
https://github.com/YGGverse/aquatic.git
synced 2026-03-31 09:45:31 +00:00
Move all crates to new crates dir
This commit is contained in:
parent
3835da22ac
commit
9b032f7e24
128 changed files with 27 additions and 26 deletions
35
crates/udp_load_test/Cargo.toml
Normal file
35
crates/udp_load_test/Cargo.toml
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
[package]
|
||||
name = "aquatic_udp_load_test"
|
||||
description = "BitTorrent (UDP) load tester"
|
||||
keywords = ["udp", "benchmark", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[features]
|
||||
cpu-pinning = ["aquatic_common/hwloc"]
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_udp_load_test"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
aquatic_udp_protocol.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
hashbrown = "0.14"
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
mio = { version = "0.8", features = ["net", "os-poll"] }
|
||||
rand_distr = "0.4"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
49
crates/udp_load_test/src/common.rs
Normal file
49
crates/udp_load_test/src/common.rs
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
use std::sync::{atomic::AtomicUsize, Arc};
|
||||
|
||||
use hashbrown::HashMap;
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct TorrentPeer {
|
||||
pub info_hash: InfoHash,
|
||||
pub scrape_hash_indeces: Vec<usize>,
|
||||
pub connection_id: ConnectionId,
|
||||
pub peer_id: PeerId,
|
||||
pub port: Port,
|
||||
}
|
||||
|
||||
pub type TorrentPeerMap = HashMap<TransactionId, TorrentPeer>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Statistics {
|
||||
pub requests: AtomicUsize,
|
||||
pub response_peers: AtomicUsize,
|
||||
pub responses_connect: AtomicUsize,
|
||||
pub responses_announce: AtomicUsize,
|
||||
pub responses_scrape: AtomicUsize,
|
||||
pub responses_error: AtomicUsize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LoadTestState {
|
||||
pub info_hashes: Arc<Vec<InfoHash>>,
|
||||
pub statistics: Arc<Statistics>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
pub enum RequestType {
|
||||
Announce,
|
||||
Connect,
|
||||
Scrape,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SocketWorkerLocalStatistics {
|
||||
pub requests: usize,
|
||||
pub response_peers: usize,
|
||||
pub responses_connect: usize,
|
||||
pub responses_announce: usize,
|
||||
pub responses_scrape: usize,
|
||||
pub responses_error: usize,
|
||||
}
|
||||
136
crates/udp_load_test/src/config.rs
Normal file
136
crates/udp_load_test/src/config.rs
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
use std::net::SocketAddr;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
use aquatic_common::cpu_pinning::desc::CpuPinningConfigDesc;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
/// aquatic_udp_load_test configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Server address
|
||||
///
|
||||
/// If you want to send IPv4 requests to a IPv4+IPv6 tracker, put an IPv4
|
||||
/// address here.
|
||||
pub server_address: SocketAddr,
|
||||
pub log_level: LogLevel,
|
||||
/// Number of workers sending requests
|
||||
pub workers: u8,
|
||||
/// Run duration (quit and generate report after this many seconds)
|
||||
pub duration: usize,
|
||||
pub network: NetworkConfig,
|
||||
pub requests: RequestConfig,
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pub cpu_pinning: CpuPinningConfigDesc,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server_address: "127.0.0.1:3000".parse().unwrap(),
|
||||
log_level: LogLevel::Error,
|
||||
workers: 1,
|
||||
duration: 0,
|
||||
network: NetworkConfig::default(),
|
||||
requests: RequestConfig::default(),
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
cpu_pinning: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// True means bind to one localhost IP per socket.
|
||||
///
|
||||
/// The point of multiple IPs is to cause a better distribution
|
||||
/// of requests to servers with SO_REUSEPORT option.
|
||||
///
|
||||
/// Setting this to true can cause issues on macOS.
|
||||
pub multiple_client_ipv4s: bool,
|
||||
/// Number of first client port
|
||||
pub first_port: u16,
|
||||
/// Socket worker poll timeout in microseconds
|
||||
pub poll_timeout: u64,
|
||||
/// Socket worker polling event number
|
||||
pub poll_event_capacity: usize,
|
||||
/// Size of socket recv buffer. Use 0 for OS default.
|
||||
///
|
||||
/// This setting can have a big impact on dropped packages. It might
|
||||
/// require changing system defaults. Some examples of commands to set
|
||||
/// values for different operating systems:
|
||||
///
|
||||
/// macOS:
|
||||
/// $ sudo sysctl net.inet.udp.recvspace=6000000
|
||||
///
|
||||
/// Linux:
|
||||
/// $ sudo sysctl -w net.core.rmem_max=104857600
|
||||
/// $ sudo sysctl -w net.core.rmem_default=104857600
|
||||
pub recv_buffer: usize,
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
multiple_client_ipv4s: true,
|
||||
first_port: 45_000,
|
||||
poll_timeout: 276,
|
||||
poll_event_capacity: 2_877,
|
||||
recv_buffer: 6_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct RequestConfig {
|
||||
/// Number of torrents to simulate
|
||||
pub number_of_torrents: usize,
|
||||
/// Maximum number of torrents to ask about in scrape requests
|
||||
pub scrape_max_torrents: usize,
|
||||
/// Probability that a generated request is a connect request as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_connect: usize,
|
||||
/// Probability that a generated request is a announce request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_announce: usize,
|
||||
/// Probability that a generated request is a scrape request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_scrape: usize,
|
||||
/// Peers choose torrents according to this Gamma distribution shape
|
||||
pub torrent_gamma_shape: f64,
|
||||
/// Peers choose torrents according to this Gamma distribution scale
|
||||
pub torrent_gamma_scale: f64,
|
||||
/// Probability that a generated peer is a seeder
|
||||
pub peer_seeder_probability: f64,
|
||||
/// Probability that an additional connect request will be sent for each
|
||||
/// mio event
|
||||
pub additional_request_probability: f32,
|
||||
}
|
||||
|
||||
impl Default for RequestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
number_of_torrents: 10_000,
|
||||
scrape_max_torrents: 50,
|
||||
weight_connect: 0,
|
||||
weight_announce: 100,
|
||||
weight_scrape: 1,
|
||||
torrent_gamma_shape: 0.2,
|
||||
torrent_gamma_scale: 100.0,
|
||||
peer_seeder_probability: 0.25,
|
||||
additional_request_probability: 0.5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
||||
203
crates/udp_load_test/src/main.rs
Normal file
203
crates/udp_load_test/src/main.rs
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
use std::thread::{self, Builder};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
|
||||
use rand_distr::Gamma;
|
||||
|
||||
mod common;
|
||||
mod config;
|
||||
mod utils;
|
||||
mod worker;
|
||||
|
||||
use common::*;
|
||||
use config::Config;
|
||||
use utils::*;
|
||||
use worker::*;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
pub fn main() {
|
||||
aquatic_common::cli::run_app_with_cli_and_config::<Config>(
|
||||
"aquatic_udp_load_test: BitTorrent load tester",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<aquatic_common::cli::LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
if config.requests.weight_announce
|
||||
+ config.requests.weight_connect
|
||||
+ config.requests.weight_scrape
|
||||
== 0
|
||||
{
|
||||
panic!("Error: at least one weight must be larger than zero.");
|
||||
}
|
||||
|
||||
println!("Starting client with config: {:#?}", config);
|
||||
|
||||
let mut info_hashes = Vec::with_capacity(config.requests.number_of_torrents);
|
||||
|
||||
for _ in 0..config.requests.number_of_torrents {
|
||||
info_hashes.push(generate_info_hash());
|
||||
}
|
||||
|
||||
let state = LoadTestState {
|
||||
info_hashes: Arc::new(info_hashes),
|
||||
statistics: Arc::new(Statistics::default()),
|
||||
};
|
||||
|
||||
let gamma = Gamma::new(
|
||||
config.requests.torrent_gamma_shape,
|
||||
config.requests.torrent_gamma_scale,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Start workers
|
||||
|
||||
for i in 0..config.workers {
|
||||
let port = config.network.first_port + (i as u16);
|
||||
|
||||
let ip = if config.server_address.is_ipv6() {
|
||||
Ipv6Addr::LOCALHOST.into()
|
||||
} else {
|
||||
if config.network.multiple_client_ipv4s {
|
||||
Ipv4Addr::new(127, 0, 0, 1 + i).into()
|
||||
} else {
|
||||
Ipv4Addr::LOCALHOST.into()
|
||||
}
|
||||
};
|
||||
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
|
||||
Builder::new().name("load-test".into()).spawn(move || {
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.workers as usize,
|
||||
0,
|
||||
WorkerIndex::SocketWorker(i as usize),
|
||||
);
|
||||
|
||||
run_worker_thread(state, gamma, &config, addr)
|
||||
})?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.workers as usize,
|
||||
0,
|
||||
WorkerIndex::Util,
|
||||
);
|
||||
|
||||
monitor_statistics(state, &config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn monitor_statistics(state: LoadTestState, config: &Config) {
|
||||
let mut report_avg_connect: Vec<f64> = Vec::new();
|
||||
let mut report_avg_announce: Vec<f64> = Vec::new();
|
||||
let mut report_avg_scrape: Vec<f64> = Vec::new();
|
||||
let mut report_avg_error: Vec<f64> = Vec::new();
|
||||
|
||||
let interval = 5;
|
||||
|
||||
let start_time = Instant::now();
|
||||
let duration = Duration::from_secs(config.duration as u64);
|
||||
|
||||
let mut last = start_time;
|
||||
|
||||
let time_elapsed = loop {
|
||||
thread::sleep(Duration::from_secs(interval));
|
||||
|
||||
let requests = fetch_and_reset(&state.statistics.requests);
|
||||
let response_peers = fetch_and_reset(&state.statistics.response_peers);
|
||||
let responses_connect = fetch_and_reset(&state.statistics.responses_connect);
|
||||
let responses_announce = fetch_and_reset(&state.statistics.responses_announce);
|
||||
let responses_scrape = fetch_and_reset(&state.statistics.responses_scrape);
|
||||
let responses_error = fetch_and_reset(&state.statistics.responses_error);
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
let elapsed = (now - last).as_secs_f64();
|
||||
|
||||
last = now;
|
||||
|
||||
let peers_per_announce_response = response_peers / responses_announce;
|
||||
|
||||
let avg_requests = requests / elapsed;
|
||||
let avg_responses_connect = responses_connect / elapsed;
|
||||
let avg_responses_announce = responses_announce / elapsed;
|
||||
let avg_responses_scrape = responses_scrape / elapsed;
|
||||
let avg_responses_error = responses_error / elapsed;
|
||||
|
||||
let avg_responses = avg_responses_connect
|
||||
+ avg_responses_announce
|
||||
+ avg_responses_scrape
|
||||
+ avg_responses_error;
|
||||
|
||||
report_avg_connect.push(avg_responses_connect);
|
||||
report_avg_announce.push(avg_responses_announce);
|
||||
report_avg_scrape.push(avg_responses_scrape);
|
||||
report_avg_error.push(avg_responses_error);
|
||||
|
||||
println!();
|
||||
println!("Requests out: {:.2}/second", avg_requests);
|
||||
println!("Responses in: {:.2}/second", avg_responses);
|
||||
println!(" - Connect responses: {:.2}", avg_responses_connect);
|
||||
println!(" - Announce responses: {:.2}", avg_responses_announce);
|
||||
println!(" - Scrape responses: {:.2}", avg_responses_scrape);
|
||||
println!(" - Error responses: {:.2}", avg_responses_error);
|
||||
println!(
|
||||
"Peers per announce response: {:.2}",
|
||||
peers_per_announce_response
|
||||
);
|
||||
|
||||
let time_elapsed = start_time.elapsed();
|
||||
|
||||
if config.duration != 0 && time_elapsed >= duration {
|
||||
break time_elapsed;
|
||||
}
|
||||
};
|
||||
|
||||
let len = report_avg_connect.len() as f64;
|
||||
|
||||
let avg_connect: f64 = report_avg_connect.into_iter().sum::<f64>() / len;
|
||||
let avg_announce: f64 = report_avg_announce.into_iter().sum::<f64>() / len;
|
||||
let avg_scrape: f64 = report_avg_scrape.into_iter().sum::<f64>() / len;
|
||||
let avg_error: f64 = report_avg_error.into_iter().sum::<f64>() / len;
|
||||
|
||||
let avg_total = avg_connect + avg_announce + avg_scrape + avg_error;
|
||||
|
||||
println!();
|
||||
println!("# aquatic load test report");
|
||||
println!();
|
||||
println!("Test ran for {} seconds", time_elapsed.as_secs());
|
||||
println!("Average responses per second: {:.2}", avg_total);
|
||||
println!(" - Connect responses: {:.2}", avg_connect);
|
||||
println!(" - Announce responses: {:.2}", avg_announce);
|
||||
println!(" - Scrape responses: {:.2}", avg_scrape);
|
||||
println!(" - Error responses: {:.2}", avg_error);
|
||||
println!();
|
||||
println!("Config: {:#?}", config);
|
||||
println!();
|
||||
}
|
||||
|
||||
fn fetch_and_reset(atomic_usize: &AtomicUsize) -> f64 {
|
||||
atomic_usize.fetch_and(0, Ordering::Relaxed) as f64
|
||||
}
|
||||
36
crates/udp_load_test/src/utils.rs
Normal file
36
crates/udp_load_test/src/utils.rs
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
pub fn gamma_usize(rng: &mut impl Rng, gamma: Gamma<f64>, max: usize) -> usize {
|
||||
let p: f64 = rng.sample(gamma);
|
||||
let p = (p.min(101.0f64) - 1.0) / 100.0;
|
||||
|
||||
(p * max as f64) as usize
|
||||
}
|
||||
|
||||
pub fn generate_peer_id() -> PeerId {
|
||||
PeerId(random_20_bytes())
|
||||
}
|
||||
|
||||
pub fn generate_info_hash() -> InfoHash {
|
||||
InfoHash(random_20_bytes())
|
||||
}
|
||||
|
||||
pub fn generate_transaction_id(rng: &mut impl Rng) -> TransactionId {
|
||||
TransactionId(rng.gen())
|
||||
}
|
||||
|
||||
pub fn create_connect_request(transaction_id: TransactionId) -> Request {
|
||||
(ConnectRequest { transaction_id }).into()
|
||||
}
|
||||
|
||||
// Don't use SmallRng here for now
|
||||
fn random_20_bytes() -> [u8; 20] {
|
||||
let mut bytes = [0; 20];
|
||||
|
||||
thread_rng().fill_bytes(&mut bytes[..]);
|
||||
|
||||
bytes
|
||||
}
|
||||
205
crates/udp_load_test/src/worker/mod.rs
Normal file
205
crates/udp_load_test/src/worker/mod.rs
Normal file
|
|
@ -0,0 +1,205 @@
|
|||
mod request_gen;
|
||||
|
||||
use std::io::Cursor;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
|
||||
use mio::{net::UdpSocket, Events, Interest, Poll, Token};
|
||||
use rand::Rng;
|
||||
use rand::{prelude::SmallRng, thread_rng, SeedableRng};
|
||||
use rand_distr::Gamma;
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::{common::*, utils::*};
|
||||
use request_gen::process_response;
|
||||
|
||||
const MAX_PACKET_SIZE: usize = 8192;
|
||||
|
||||
pub fn run_worker_thread(
|
||||
state: LoadTestState,
|
||||
gamma: Gamma<f64>,
|
||||
config: &Config,
|
||||
addr: SocketAddr,
|
||||
) {
|
||||
let mut socket = UdpSocket::from_std(create_socket(config, addr));
|
||||
let mut buffer = [0u8; MAX_PACKET_SIZE];
|
||||
|
||||
let mut rng = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
|
||||
let mut torrent_peers = TorrentPeerMap::default();
|
||||
|
||||
let token = Token(0);
|
||||
let interests = Interest::READABLE;
|
||||
let timeout = Duration::from_micros(config.network.poll_timeout);
|
||||
|
||||
let mut poll = Poll::new().expect("create poll");
|
||||
|
||||
poll.registry()
|
||||
.register(&mut socket, token, interests)
|
||||
.unwrap();
|
||||
|
||||
let mut events = Events::with_capacity(config.network.poll_event_capacity);
|
||||
|
||||
let mut statistics = SocketWorkerLocalStatistics::default();
|
||||
|
||||
// Bootstrap request cycle
|
||||
let initial_request = create_connect_request(generate_transaction_id(&mut thread_rng()));
|
||||
send_request(&mut socket, &mut buffer, &mut statistics, initial_request);
|
||||
|
||||
loop {
|
||||
poll.poll(&mut events, Some(timeout))
|
||||
.expect("failed polling");
|
||||
|
||||
for event in events.iter() {
|
||||
if (event.token() == token) & event.is_readable() {
|
||||
while let Ok(amt) = socket.recv(&mut buffer) {
|
||||
match Response::from_bytes(&buffer[0..amt], addr.is_ipv4()) {
|
||||
Ok(response) => {
|
||||
match response {
|
||||
Response::AnnounceIpv4(ref r) => {
|
||||
statistics.responses_announce += 1;
|
||||
statistics.response_peers += r.peers.len();
|
||||
}
|
||||
Response::AnnounceIpv6(ref r) => {
|
||||
statistics.responses_announce += 1;
|
||||
statistics.response_peers += r.peers.len();
|
||||
}
|
||||
Response::Scrape(_) => {
|
||||
statistics.responses_scrape += 1;
|
||||
}
|
||||
Response::Connect(_) => {
|
||||
statistics.responses_connect += 1;
|
||||
}
|
||||
Response::Error(_) => {
|
||||
statistics.responses_error += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let opt_request = process_response(
|
||||
&mut rng,
|
||||
gamma,
|
||||
&state.info_hashes,
|
||||
&config,
|
||||
&mut torrent_peers,
|
||||
response,
|
||||
);
|
||||
|
||||
if let Some(request) = opt_request {
|
||||
send_request(&mut socket, &mut buffer, &mut statistics, request);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Received invalid response: {:#?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rng.gen::<f32>() <= config.requests.additional_request_probability {
|
||||
let additional_request =
|
||||
create_connect_request(generate_transaction_id(&mut rng));
|
||||
|
||||
send_request(
|
||||
&mut socket,
|
||||
&mut buffer,
|
||||
&mut statistics,
|
||||
additional_request,
|
||||
);
|
||||
}
|
||||
|
||||
update_shared_statistics(&state, &mut statistics);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request(
|
||||
socket: &mut UdpSocket,
|
||||
buffer: &mut [u8],
|
||||
statistics: &mut SocketWorkerLocalStatistics,
|
||||
request: Request,
|
||||
) {
|
||||
let mut cursor = Cursor::new(buffer);
|
||||
|
||||
match request.write(&mut cursor) {
|
||||
Ok(()) => {
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.get_ref();
|
||||
|
||||
match socket.send(&inner[..position]) {
|
||||
Ok(_) => {
|
||||
statistics.requests += 1;
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Couldn't send packet: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("request_to_bytes err: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_shared_statistics(state: &LoadTestState, statistics: &mut SocketWorkerLocalStatistics) {
|
||||
state
|
||||
.statistics
|
||||
.requests
|
||||
.fetch_add(statistics.requests, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_connect
|
||||
.fetch_add(statistics.responses_connect, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_announce
|
||||
.fetch_add(statistics.responses_announce, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_scrape
|
||||
.fetch_add(statistics.responses_scrape, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_error
|
||||
.fetch_add(statistics.responses_error, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.response_peers
|
||||
.fetch_add(statistics.response_peers, Ordering::Relaxed);
|
||||
|
||||
*statistics = SocketWorkerLocalStatistics::default();
|
||||
}
|
||||
|
||||
fn create_socket(config: &Config, addr: SocketAddr) -> ::std::net::UdpSocket {
|
||||
let socket = if addr.is_ipv4() {
|
||||
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))
|
||||
} else {
|
||||
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))
|
||||
}
|
||||
.expect("create socket");
|
||||
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.expect("socket: set nonblocking");
|
||||
|
||||
if config.network.recv_buffer != 0 {
|
||||
if let Err(err) = socket.set_recv_buffer_size(config.network.recv_buffer) {
|
||||
eprintln!(
|
||||
"socket: failed setting recv buffer to {}: {:?}",
|
||||
config.network.recv_buffer, err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
socket
|
||||
.bind(&addr.into())
|
||||
.unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", addr, err));
|
||||
|
||||
socket
|
||||
.connect(&config.server_address.into())
|
||||
.expect("socket: connect to server");
|
||||
|
||||
socket.into()
|
||||
}
|
||||
218
crates/udp_load_test/src/worker/request_gen.rs
Normal file
218
crates/udp_load_test/src/worker/request_gen.rs
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use rand::distributions::WeightedIndex;
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
use crate::utils::*;
|
||||
|
||||
pub fn process_response(
|
||||
rng: &mut impl Rng,
|
||||
gamma: Gamma<f64>,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
config: &Config,
|
||||
torrent_peers: &mut TorrentPeerMap,
|
||||
response: Response,
|
||||
) -> Option<Request> {
|
||||
match response {
|
||||
Response::Connect(r) => {
|
||||
// Fetch the torrent peer or create it if is doesn't exists. Update
|
||||
// the connection id if fetched. Create a request and move the
|
||||
// torrent peer appropriately.
|
||||
|
||||
let torrent_peer = torrent_peers
|
||||
.remove(&r.transaction_id)
|
||||
.map(|mut torrent_peer| {
|
||||
torrent_peer.connection_id = r.connection_id;
|
||||
|
||||
torrent_peer
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
create_torrent_peer(config, rng, gamma, info_hashes, r.connection_id)
|
||||
});
|
||||
|
||||
let new_transaction_id = generate_transaction_id(rng);
|
||||
|
||||
let request =
|
||||
create_random_request(config, rng, info_hashes, new_transaction_id, &torrent_peer);
|
||||
|
||||
torrent_peers.insert(new_transaction_id, torrent_peer);
|
||||
|
||||
Some(request)
|
||||
}
|
||||
Response::AnnounceIpv4(r) => if_torrent_peer_move_and_create_random_request(
|
||||
config,
|
||||
rng,
|
||||
info_hashes,
|
||||
torrent_peers,
|
||||
r.transaction_id,
|
||||
),
|
||||
Response::AnnounceIpv6(r) => if_torrent_peer_move_and_create_random_request(
|
||||
config,
|
||||
rng,
|
||||
info_hashes,
|
||||
torrent_peers,
|
||||
r.transaction_id,
|
||||
),
|
||||
Response::Scrape(r) => if_torrent_peer_move_and_create_random_request(
|
||||
config,
|
||||
rng,
|
||||
info_hashes,
|
||||
torrent_peers,
|
||||
r.transaction_id,
|
||||
),
|
||||
Response::Error(r) => {
|
||||
if !r.message.to_lowercase().contains("connection") {
|
||||
eprintln!(
|
||||
"Received error response which didn't contain the word 'connection': {}",
|
||||
r.message
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(torrent_peer) = torrent_peers.remove(&r.transaction_id) {
|
||||
let new_transaction_id = generate_transaction_id(rng);
|
||||
|
||||
torrent_peers.insert(new_transaction_id, torrent_peer);
|
||||
|
||||
Some(create_connect_request(new_transaction_id))
|
||||
} else {
|
||||
Some(create_connect_request(generate_transaction_id(rng)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn if_torrent_peer_move_and_create_random_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
torrent_peers: &mut TorrentPeerMap,
|
||||
transaction_id: TransactionId,
|
||||
) -> Option<Request> {
|
||||
if let Some(torrent_peer) = torrent_peers.remove(&transaction_id) {
|
||||
let new_transaction_id = generate_transaction_id(rng);
|
||||
|
||||
let request =
|
||||
create_random_request(config, rng, info_hashes, new_transaction_id, &torrent_peer);
|
||||
|
||||
torrent_peers.insert(new_transaction_id, torrent_peer);
|
||||
|
||||
Some(request)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn create_random_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
transaction_id: TransactionId,
|
||||
torrent_peer: &TorrentPeer,
|
||||
) -> Request {
|
||||
const ITEMS: [RequestType; 3] = [
|
||||
RequestType::Announce,
|
||||
RequestType::Connect,
|
||||
RequestType::Scrape,
|
||||
];
|
||||
|
||||
let weights = [
|
||||
config.requests.weight_announce as u32,
|
||||
config.requests.weight_connect as u32,
|
||||
config.requests.weight_scrape as u32,
|
||||
];
|
||||
|
||||
let dist = WeightedIndex::new(weights).expect("random request weighted index");
|
||||
|
||||
match ITEMS[dist.sample(rng)] {
|
||||
RequestType::Announce => create_announce_request(config, rng, torrent_peer, transaction_id),
|
||||
RequestType::Connect => create_connect_request(transaction_id),
|
||||
RequestType::Scrape => create_scrape_request(&info_hashes, torrent_peer, transaction_id),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_announce_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
torrent_peer: &TorrentPeer,
|
||||
transaction_id: TransactionId,
|
||||
) -> Request {
|
||||
let (event, bytes_left) = {
|
||||
if rng.gen_bool(config.requests.peer_seeder_probability) {
|
||||
(AnnounceEvent::Completed, NumberOfBytes(0))
|
||||
} else {
|
||||
(AnnounceEvent::Started, NumberOfBytes(50))
|
||||
}
|
||||
};
|
||||
|
||||
(AnnounceRequest {
|
||||
connection_id: torrent_peer.connection_id,
|
||||
transaction_id,
|
||||
info_hash: torrent_peer.info_hash,
|
||||
peer_id: torrent_peer.peer_id,
|
||||
bytes_downloaded: NumberOfBytes(50),
|
||||
bytes_uploaded: NumberOfBytes(50),
|
||||
bytes_left,
|
||||
event,
|
||||
ip_address: None,
|
||||
key: PeerKey(12345),
|
||||
peers_wanted: NumberOfPeers(100),
|
||||
port: torrent_peer.port,
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
fn create_scrape_request(
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
torrent_peer: &TorrentPeer,
|
||||
transaction_id: TransactionId,
|
||||
) -> Request {
|
||||
let indeces = &torrent_peer.scrape_hash_indeces;
|
||||
|
||||
let mut scape_hashes = Vec::with_capacity(indeces.len());
|
||||
|
||||
for i in indeces {
|
||||
scape_hashes.push(info_hashes[*i].to_owned())
|
||||
}
|
||||
|
||||
(ScrapeRequest {
|
||||
connection_id: torrent_peer.connection_id,
|
||||
transaction_id,
|
||||
info_hashes: scape_hashes,
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
fn create_torrent_peer(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
gamma: Gamma<f64>,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
connection_id: ConnectionId,
|
||||
) -> TorrentPeer {
|
||||
let num_scape_hashes = rng.gen_range(1..config.requests.scrape_max_torrents);
|
||||
|
||||
let mut scrape_hash_indeces = Vec::new();
|
||||
|
||||
for _ in 0..num_scape_hashes {
|
||||
scrape_hash_indeces.push(select_info_hash_index(config, rng, gamma))
|
||||
}
|
||||
|
||||
let info_hash_index = select_info_hash_index(config, rng, gamma);
|
||||
|
||||
TorrentPeer {
|
||||
info_hash: info_hashes[info_hash_index],
|
||||
scrape_hash_indeces,
|
||||
connection_id,
|
||||
peer_id: generate_peer_id(),
|
||||
port: Port(rng.gen()),
|
||||
}
|
||||
}
|
||||
|
||||
fn select_info_hash_index(config: &Config, rng: &mut impl Rng, gamma: Gamma<f64>) -> usize {
|
||||
gamma_usize(rng, gamma, config.requests.number_of_torrents - 1)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue