Run rustfmt, clean up aquatic_http_protocol/Cargo.toml

This commit is contained in:
Joakim Frostegård 2021-08-15 22:26:11 +02:00
parent 0cc312a78d
commit d0e716f80b
65 changed files with 1754 additions and 2590 deletions

View file

@ -1,24 +1,22 @@
use std::net::SocketAddr;
use std::sync::{Arc, atomic::AtomicUsize};
use std::sync::{atomic::AtomicUsize, Arc};
use hashbrown::HashMap;
use parking_lot::Mutex;
use serde::{Serialize, Deserialize};
use serde::{Deserialize, Serialize};
use aquatic_udp_protocol::*;
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
pub struct ThreadId(pub u8);
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Server address
pub server_address: SocketAddr,
/// Number of sockets and socket worker threads
///
///
/// Sockets will bind to one port each, and with
/// multiple_client_ips = true, additionally to one IP each.
pub num_socket_workers: u8,
@ -31,14 +29,13 @@ pub struct Config {
pub handler: HandlerConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct NetworkConfig {
/// True means bind to one localhost IP per socket. On macOS, this by
/// default causes all server responses to go to one socket worker.
/// Default option ("true") can cause issues on macOS.
///
///
/// The point of multiple IPs is to possibly cause a better distribution
/// of requests to servers with SO_REUSEPORT option.
pub multiple_client_ips: bool,
@ -51,7 +48,7 @@ pub struct NetworkConfig {
/// Socket worker polling event number
pub poll_event_capacity: usize,
/// Size of socket recv buffer. Use 0 for OS default.
///
///
/// This setting can have a big impact on dropped packages. It might
/// require changing system defaults. Some examples of commands to set
/// recommended values for different operating systems:
@ -67,7 +64,6 @@ pub struct NetworkConfig {
pub recv_buffer: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
@ -89,7 +85,7 @@ pub struct HandlerConfig {
/// Handler: max microseconds to wait for single response from channel
pub channel_timeout: u64,
/// Pareto shape
///
///
/// Fake peers choose torrents according to Pareto distribution.
pub torrent_selection_pareto_shape: f64,
/// Probability that a generated peer is a seeder
@ -100,7 +96,6 @@ pub struct HandlerConfig {
pub additional_request_factor: f64,
}
impl Default for Config {
fn default() -> Self {
Self {
@ -127,7 +122,6 @@ impl Default for NetworkConfig {
}
}
impl Default for HandlerConfig {
fn default() -> Self {
Self {
@ -145,7 +139,6 @@ impl Default for HandlerConfig {
}
}
#[derive(PartialEq, Eq, Clone)]
pub struct TorrentPeer {
pub info_hash: InfoHash,
@ -155,10 +148,8 @@ pub struct TorrentPeer {
pub port: Port,
}
pub type TorrentPeerMap = HashMap<TransactionId, TorrentPeer>;
#[derive(Default)]
pub struct Statistics {
pub requests: AtomicUsize,
@ -169,7 +160,6 @@ pub struct Statistics {
pub responses_error: AtomicUsize,
}
#[derive(Clone)]
pub struct LoadTestState {
pub torrent_peers: Arc<Mutex<TorrentPeerMap>>,
@ -177,15 +167,13 @@ pub struct LoadTestState {
pub statistics: Arc<Statistics>,
}
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum RequestType {
Announce,
Connect,
Scrape
Scrape,
}
#[derive(Default)]
pub struct SocketWorkerLocalStatistics {
pub requests: usize,
@ -194,4 +182,4 @@ pub struct SocketWorkerLocalStatistics {
pub responses_announce: usize,
pub responses_scrape: usize,
pub responses_error: usize,
}
}

View file

@ -13,21 +13,18 @@ use aquatic_udp_protocol::*;
use crate::common::*;
use crate::utils::*;
pub fn run_handler_thread(
config: &Config,
state: LoadTestState,
pareto: Pareto<f64>,
request_senders: Vec<Sender<Request>>,
response_receiver: Receiver<(ThreadId, Response)>,
){
) {
let state = &state;
let mut rng1 = SmallRng::from_rng(thread_rng())
.expect("create SmallRng from thread_rng()");
let mut rng2 = SmallRng::from_rng(thread_rng())
.expect("create SmallRng from thread_rng()");
let mut rng1 = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
let mut rng2 = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
let timeout = Duration::from_micros(config.handler.channel_timeout);
let mut responses = Vec::new();
@ -40,30 +37,30 @@ pub fn run_handler_thread(
// only if ConnectionMap mutex isn't locked.
for i in 0..config.handler.max_responses_per_iter {
let response = if i == 0 {
match response_receiver.recv(){
match response_receiver.recv() {
Ok(r) => r,
Err(_) => break, // Really shouldn't happen
}
} else {
match response_receiver.recv_timeout(timeout){
match response_receiver.recv_timeout(timeout) {
Ok(r) => r,
Err(_) => {
if let Some(guard) = state.torrent_peers.try_lock(){
if let Some(guard) = state.torrent_peers.try_lock() {
opt_torrent_peers = Some(guard);
break
break;
} else {
continue
continue;
}
},
}
}
};
responses.push(response);
}
let mut torrent_peers: MutexGuard<TorrentPeerMap> = opt_torrent_peers
.unwrap_or_else(|| state.torrent_peers.lock());
let mut torrent_peers: MutexGuard<TorrentPeerMap> =
opt_torrent_peers.unwrap_or_else(|| state.torrent_peers.lock());
let requests = process_responses(
&mut rng1,
@ -71,69 +68,60 @@ pub fn run_handler_thread(
&state.info_hashes,
config,
&mut torrent_peers,
responses.drain(..)
responses.drain(..),
);
// Somewhat dubious heuristic for deciding how fast to create
// and send additional requests (requests not having anything
// to do with previously sent requests)
let num_additional_to_send = {
let num_additional_requests = requests.iter()
.map(|v| v.len())
.sum::<usize>() as f64;
let num_new_requests_per_socket = num_additional_requests /
config.num_socket_workers as f64;
let num_additional_requests = requests.iter().map(|v| v.len()).sum::<usize>() as f64;
((num_new_requests_per_socket / 1.2) * config.handler.additional_request_factor) as usize + 10
let num_new_requests_per_socket =
num_additional_requests / config.num_socket_workers as f64;
((num_new_requests_per_socket / 1.2) * config.handler.additional_request_factor)
as usize
+ 10
};
for (channel_index, new_requests) in requests.into_iter().enumerate(){
for (channel_index, new_requests) in requests.into_iter().enumerate() {
let channel = &request_senders[channel_index];
for _ in 0..num_additional_to_send {
let request = create_connect_request(
generate_transaction_id(&mut rng2)
);
let request = create_connect_request(generate_transaction_id(&mut rng2));
channel.send(request)
channel
.send(request)
.expect("send request to channel in handler worker");
}
for request in new_requests.into_iter(){
channel.send(request)
for request in new_requests.into_iter() {
channel
.send(request)
.expect("send request to channel in handler worker");
}
}
}
}
fn process_responses(
rng: &mut impl Rng,
pareto: Pareto<f64>,
info_hashes: &Arc<Vec<InfoHash>>,
config: &Config,
torrent_peers: &mut TorrentPeerMap,
responses: Drain<(ThreadId, Response)>
responses: Drain<(ThreadId, Response)>,
) -> Vec<Vec<Request>> {
let mut new_requests = Vec::with_capacity(
config.num_socket_workers as usize
);
let mut new_requests = Vec::with_capacity(config.num_socket_workers as usize);
for _ in 0..config.num_socket_workers {
new_requests.push(Vec::new());
}
for (socket_thread_id, response) in responses.into_iter() {
let opt_request = process_response(
rng,
pareto,
info_hashes,
&config,
torrent_peers,
response
);
let opt_request =
process_response(rng, pareto, info_hashes, &config, torrent_peers, response);
if let Some(new_request) = opt_request {
new_requests[socket_thread_id.0 as usize].push(new_request);
@ -143,77 +131,63 @@ fn process_responses(
new_requests
}
fn process_response(
rng: &mut impl Rng,
pareto: Pareto<f64>,
info_hashes: &Arc<Vec<InfoHash>>,
config: &Config,
torrent_peers: &mut TorrentPeerMap,
response: Response
response: Response,
) -> Option<Request> {
match response {
Response::Connect(r) => {
// Fetch the torrent peer or create it if is doesn't exists. Update
// the connection id if fetched. Create a request and move the
// torrent peer appropriately.
let torrent_peer = torrent_peers.remove(&r.transaction_id)
let torrent_peer = torrent_peers
.remove(&r.transaction_id)
.map(|mut torrent_peer| {
torrent_peer.connection_id = r.connection_id;
torrent_peer
})
.unwrap_or_else(|| {
create_torrent_peer(
config,
rng,
pareto,
info_hashes,
r.connection_id
)
create_torrent_peer(config, rng, pareto, info_hashes, r.connection_id)
});
let new_transaction_id = generate_transaction_id(rng);
let request = create_random_request(
config,
rng,
info_hashes,
new_transaction_id,
&torrent_peer
);
let request =
create_random_request(config, rng, info_hashes, new_transaction_id, &torrent_peer);
torrent_peers.insert(new_transaction_id, torrent_peer);
Some(request)
},
Response::Announce(r) => {
if_torrent_peer_move_and_create_random_request(
config,
rng,
info_hashes,
torrent_peers,
r.transaction_id
)
},
Response::Scrape(r) => {
if_torrent_peer_move_and_create_random_request(
config,
rng,
info_hashes,
torrent_peers,
r.transaction_id
)
},
}
Response::Announce(r) => if_torrent_peer_move_and_create_random_request(
config,
rng,
info_hashes,
torrent_peers,
r.transaction_id,
),
Response::Scrape(r) => if_torrent_peer_move_and_create_random_request(
config,
rng,
info_hashes,
torrent_peers,
r.transaction_id,
),
Response::Error(r) => {
if !r.message.to_lowercase().contains("connection"){
eprintln!("Received error response which didn't contain the word 'connection': {}", r.message);
if !r.message.to_lowercase().contains("connection") {
eprintln!(
"Received error response which didn't contain the word 'connection': {}",
r.message
);
}
if let Some(torrent_peer) = torrent_peers.remove(&r.transaction_id){
if let Some(torrent_peer) = torrent_peers.remove(&r.transaction_id) {
let new_transaction_id = generate_transaction_id(rng);
torrent_peers.insert(new_transaction_id, torrent_peer);
@ -226,7 +200,6 @@ fn process_response(
}
}
fn if_torrent_peer_move_and_create_random_request(
config: &Config,
rng: &mut impl Rng,
@ -234,16 +207,11 @@ fn if_torrent_peer_move_and_create_random_request(
torrent_peers: &mut TorrentPeerMap,
transaction_id: TransactionId,
) -> Option<Request> {
if let Some(torrent_peer) = torrent_peers.remove(&transaction_id){
if let Some(torrent_peer) = torrent_peers.remove(&transaction_id) {
let new_transaction_id = generate_transaction_id(rng);
let request = create_random_request(
config,
rng,
info_hashes,
new_transaction_id,
&torrent_peer
);
let request =
create_random_request(config, rng, info_hashes, new_transaction_id, &torrent_peer);
torrent_peers.insert(new_transaction_id, torrent_peer);
@ -253,18 +221,17 @@ fn if_torrent_peer_move_and_create_random_request(
}
}
fn create_random_request(
config: &Config,
rng: &mut impl Rng,
info_hashes: &Arc<Vec<InfoHash>>,
transaction_id: TransactionId,
torrent_peer: &TorrentPeer
torrent_peer: &TorrentPeer,
) -> Request {
let weights = vec![
config.handler.weight_announce as u32,
config.handler.weight_connect as u32,
config.handler.weight_scrape as u32,
config.handler.weight_connect as u32,
config.handler.weight_scrape as u32,
];
let items = vec![
@ -273,26 +240,15 @@ fn create_random_request(
RequestType::Scrape,
];
let dist = WeightedIndex::new(&weights)
.expect("random request weighted index");
let dist = WeightedIndex::new(&weights).expect("random request weighted index");
match items[dist.sample(rng)] {
RequestType::Announce => create_announce_request(
config,
rng,
torrent_peer,
transaction_id
),
RequestType::Announce => create_announce_request(config, rng, torrent_peer, transaction_id),
RequestType::Connect => create_connect_request(transaction_id),
RequestType::Scrape => create_scrape_request(
&info_hashes,
torrent_peer,
transaction_id
)
RequestType::Scrape => create_scrape_request(&info_hashes, torrent_peer, transaction_id),
}
}
fn create_announce_request(
config: &Config,
rng: &mut impl Rng,
@ -319,11 +275,11 @@ fn create_announce_request(
ip_address: None,
key: PeerKey(12345),
peers_wanted: NumberOfPeers(100),
port: torrent_peer.port
}).into()
port: torrent_peer.port,
})
.into()
}
fn create_scrape_request(
info_hashes: &Arc<Vec<InfoHash>>,
torrent_peer: &TorrentPeer,
@ -341,5 +297,6 @@ fn create_scrape_request(
connection_id: torrent_peer.connection_id,
transaction_id,
info_hashes: scape_hashes,
}).into()
}
})
.into()
}

View file

@ -1,6 +1,6 @@
use std::net::{SocketAddr, Ipv4Addr, Ipv6Addr};
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
use std::sync::{atomic::Ordering, Arc};
use std::thread;
use std::sync::{Arc, atomic::Ordering};
use std::time::{Duration, Instant};
use crossbeam_channel::unbounded;
@ -15,16 +15,14 @@ mod network;
mod utils;
use common::*;
use utils::*;
use network::*;
use handler::run_handler_thread;
use network::*;
use utils::*;
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
pub fn main(){
pub fn main() {
aquatic_cli_helpers::run_app_with_cli_and_config::<Config>(
"aquatic_udp_load_test: BitTorrent load tester",
run,
@ -32,17 +30,17 @@ pub fn main(){
)
}
impl aquatic_cli_helpers::Config for Config {}
fn run(config: Config) -> ::anyhow::Result<()> {
if config.handler.weight_announce + config.handler.weight_connect + config.handler.weight_scrape == 0 {
if config.handler.weight_announce + config.handler.weight_connect + config.handler.weight_scrape
== 0
{
panic!("Error: at least one weight must be larger than zero.");
}
println!("Starting client with config: {:#?}", config);
let mut info_hashes = Vec::with_capacity(config.handler.number_of_torrents);
for _ in 0..config.handler.number_of_torrents {
@ -55,10 +53,7 @@ fn run(config: Config) -> ::anyhow::Result<()> {
statistics: Arc::new(Statistics::default()),
};
let pareto = Pareto::new(
1.0,
config.handler.torrent_selection_pareto_shape
).unwrap();
let pareto = Pareto::new(1.0, config.handler.torrent_selection_pareto_shape).unwrap();
// Start socket workers
@ -72,7 +67,8 @@ fn run(config: Config) -> ::anyhow::Result<()> {
let port = config.network.first_port + (i as u16);
let addr = if config.network.multiple_client_ips {
let ip = if config.network.ipv6_client { // FIXME: test ipv6
let ip = if config.network.ipv6_client {
// FIXME: test ipv6
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1 + i as u16).into()
} else {
Ipv4Addr::new(127, 0, 0, 1 + i).into()
@ -95,54 +91,37 @@ fn run(config: Config) -> ::anyhow::Result<()> {
let response_sender = response_sender.clone();
let state = state.clone();
thread::spawn(move || run_socket_thread(
state,
response_sender,
receiver,
&config,
addr,
thread_id
));
thread::spawn(move || {
run_socket_thread(state, response_sender, receiver, &config, addr, thread_id)
});
}
for _ in 0..config.num_request_workers {
let config = config.clone();
let state= state.clone();
let state = state.clone();
let request_senders = request_senders.clone();
let response_receiver = response_receiver.clone();
thread::spawn(move || run_handler_thread(
&config,
state,
pareto,
request_senders,
response_receiver,
));
thread::spawn(move || {
run_handler_thread(&config, state, pareto, request_senders, response_receiver)
});
}
// Bootstrap request cycle by adding a request to each request channel
for sender in request_senders.iter(){
let request = create_connect_request(
generate_transaction_id(&mut thread_rng())
);
for sender in request_senders.iter() {
let request = create_connect_request(generate_transaction_id(&mut thread_rng()));
sender.send(request)
sender
.send(request)
.expect("bootstrap: add initial request to request queue");
}
monitor_statistics(
state,
&config
);
monitor_statistics(state, &config);
Ok(())
}
fn monitor_statistics(
state: LoadTestState,
config: &Config,
){
fn monitor_statistics(state: LoadTestState, config: &Config) {
let start_time = Instant::now();
let mut report_avg_response_vec: Vec<f64> = Vec::new();
@ -154,39 +133,46 @@ fn monitor_statistics(
let statistics = state.statistics.as_ref();
let responses_announce = statistics.responses_announce
.fetch_and(0, Ordering::SeqCst) as f64;
let response_peers = statistics.response_peers
.fetch_and(0, Ordering::SeqCst) as f64;
let responses_announce =
statistics.responses_announce.fetch_and(0, Ordering::SeqCst) as f64;
let response_peers = statistics.response_peers.fetch_and(0, Ordering::SeqCst) as f64;
let requests_per_second = statistics.requests
.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_connect_per_second = statistics.responses_connect
.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_scrape_per_second = statistics.responses_scrape
.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_error_per_second = statistics.responses_error
.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let requests_per_second =
statistics.requests.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_connect_per_second =
statistics.responses_connect.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_scrape_per_second =
statistics.responses_scrape.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_error_per_second =
statistics.responses_error.fetch_and(0, Ordering::SeqCst) as f64 / interval_f64;
let responses_announce_per_second = responses_announce / interval_f64;
let responses_announce_per_second = responses_announce / interval_f64;
let responses_per_second =
responses_connect_per_second +
responses_announce_per_second +
responses_scrape_per_second +
responses_error_per_second;
let responses_per_second = responses_connect_per_second
+ responses_announce_per_second
+ responses_scrape_per_second
+ responses_error_per_second;
report_avg_response_vec.push(responses_per_second);
println!();
println!("Requests out: {:.2}/second", requests_per_second);
println!("Responses in: {:.2}/second", responses_per_second);
println!(" - Connect responses: {:.2}", responses_connect_per_second);
println!(" - Announce responses: {:.2}", responses_announce_per_second);
println!(
" - Connect responses: {:.2}",
responses_connect_per_second
);
println!(
" - Announce responses: {:.2}",
responses_announce_per_second
);
println!(" - Scrape responses: {:.2}", responses_scrape_per_second);
println!(" - Error responses: {:.2}", responses_error_per_second);
println!("Peers per announce response: {:.2}", response_peers / responses_announce);
println!(
"Peers per announce response: {:.2}",
response_peers / responses_announce
);
let time_elapsed = start_time.elapsed();
let duration = Duration::from_secs(config.duration as u64);
@ -206,7 +192,7 @@ fn monitor_statistics(
config
);
break
break;
}
}
}

View file

@ -4,57 +4,54 @@ use std::sync::atomic::Ordering;
use std::time::Duration;
use crossbeam_channel::{Receiver, Sender};
use mio::{net::UdpSocket, Events, Poll, Interest, Token};
use socket2::{Socket, Domain, Type, Protocol};
use mio::{net::UdpSocket, Events, Interest, Poll, Token};
use socket2::{Domain, Protocol, Socket, Type};
use aquatic_udp_protocol::*;
use crate::common::*;
const MAX_PACKET_SIZE: usize = 4096;
pub fn create_socket(
config: &Config,
addr: SocketAddr
) -> ::std::net::UdpSocket {
let socket = if addr.is_ipv4(){
pub fn create_socket(config: &Config, addr: SocketAddr) -> ::std::net::UdpSocket {
let socket = if addr.is_ipv4() {
Socket::new(Domain::ipv4(), Type::dgram(), Some(Protocol::udp()))
} else {
Socket::new(Domain::ipv6(), Type::dgram(), Some(Protocol::udp()))
}.expect("create socket");
}
.expect("create socket");
socket.set_nonblocking(true)
socket
.set_nonblocking(true)
.expect("socket: set nonblocking");
if config.network.recv_buffer != 0 {
if let Err(err) = socket.set_recv_buffer_size(config.network.recv_buffer){
if let Err(err) = socket.set_recv_buffer_size(config.network.recv_buffer) {
eprintln!(
"socket: failed setting recv buffer to {}: {:?}",
config.network.recv_buffer,
err
config.network.recv_buffer, err
);
}
}
socket.bind(&addr.into())
socket
.bind(&addr.into())
.unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", addr, err));
socket.connect(&config.server_address.into())
socket
.connect(&config.server_address.into())
.expect("socket: connect to server");
socket.into_udp_socket()
}
pub fn run_socket_thread(
state: LoadTestState,
response_channel_sender: Sender<(ThreadId, Response)>,
request_receiver: Receiver<Request>,
config: &Config,
addr: SocketAddr,
thread_id: ThreadId
thread_id: ThreadId,
) {
let mut socket = UdpSocket::from_std(create_socket(config, addr));
let mut buffer = [0u8; MAX_PACKET_SIZE];
@ -78,23 +75,23 @@ pub fn run_socket_thread(
poll.poll(&mut events, Some(timeout))
.expect("failed polling");
for event in events.iter(){
if (event.token() == token) & event.is_readable(){
for event in events.iter() {
if (event.token() == token) & event.is_readable() {
read_responses(
thread_id,
&socket,
&mut buffer,
&mut local_state,
&mut responses
&mut responses,
);
for r in responses.drain(..){
response_channel_sender.send(r)
.unwrap_or_else(|err| panic!(
for r in responses.drain(..) {
response_channel_sender.send(r).unwrap_or_else(|err| {
panic!(
"add response to channel in socket worker {}: {:?}",
thread_id.0,
err
));
thread_id.0, err
)
});
}
poll.registry()
@ -107,7 +104,7 @@ pub fn run_socket_thread(
&mut socket,
&mut buffer,
&request_receiver,
&mut local_state
&mut local_state,
);
}
@ -116,40 +113,39 @@ pub fn run_socket_thread(
&mut socket,
&mut buffer,
&request_receiver,
&mut local_state
&mut local_state,
);
}
}
fn read_responses(
thread_id: ThreadId,
socket: &UdpSocket,
buffer: &mut [u8],
ls: &mut SocketWorkerLocalStatistics,
responses: &mut Vec<(ThreadId, Response)>,
){
) {
while let Ok(amt) = socket.recv(buffer) {
match Response::from_bytes(&buffer[0..amt]){
match Response::from_bytes(&buffer[0..amt]) {
Ok(response) => {
match response {
Response::Announce(ref r) => {
ls.responses_announce += 1;
ls.response_peers += r.peers.len();
},
}
Response::Scrape(_) => {
ls.responses_scrape += 1;
},
}
Response::Connect(_) => {
ls.responses_connect += 1;
},
}
Response::Error(_) => {
ls.responses_error += 1;
},
}
}
responses.push((thread_id, response))
},
}
Err(err) => {
eprintln!("Received invalid response: {:#?}", err);
}
@ -157,20 +153,19 @@ fn read_responses(
}
}
fn send_requests(
state: &LoadTestState,
socket: &mut UdpSocket,
buffer: &mut [u8],
receiver: &Receiver<Request>,
statistics: &mut SocketWorkerLocalStatistics,
){
) {
let mut cursor = Cursor::new(buffer);
while let Ok(request) = receiver.try_recv() {
cursor.set_position(0);
if let Err(err) = request.write(&mut cursor){
if let Err(err) = request.write(&mut cursor) {
eprintln!("request_to_bytes err: {}", err);
}
@ -180,25 +175,37 @@ fn send_requests(
match socket.send(&inner[..position]) {
Ok(_) => {
statistics.requests += 1;
},
}
Err(err) => {
eprintln!("Couldn't send packet: {:?}", err);
}
}
}
state.statistics.requests
state
.statistics
.requests
.fetch_add(statistics.requests, Ordering::SeqCst);
state.statistics.responses_connect
state
.statistics
.responses_connect
.fetch_add(statistics.responses_connect, Ordering::SeqCst);
state.statistics.responses_announce
state
.statistics
.responses_announce
.fetch_add(statistics.responses_announce, Ordering::SeqCst);
state.statistics.responses_scrape
state
.statistics
.responses_scrape
.fetch_add(statistics.responses_scrape, Ordering::SeqCst);
state.statistics.responses_error
state
.statistics
.responses_error
.fetch_add(statistics.responses_error, Ordering::SeqCst);
state.statistics.response_peers
state
.statistics
.response_peers
.fetch_add(statistics.response_peers, Ordering::SeqCst);
*statistics = SocketWorkerLocalStatistics::default();
}
}

View file

@ -1,30 +1,25 @@
use std::sync::Arc;
use rand_distr::Pareto;
use rand::prelude::*;
use rand_distr::Pareto;
use aquatic_udp_protocol::*;
use crate::common::*;
pub fn create_torrent_peer(
config: &Config,
rng: &mut impl Rng,
pareto: Pareto<f64>,
info_hashes: &Arc<Vec<InfoHash>>,
connection_id: ConnectionId
connection_id: ConnectionId,
) -> TorrentPeer {
let num_scape_hashes = rng.gen_range(
1..config.handler.scrape_max_torrents
);
let num_scape_hashes = rng.gen_range(1..config.handler.scrape_max_torrents);
let mut scrape_hash_indeces = Vec::new();
for _ in 0..num_scape_hashes {
scrape_hash_indeces.push(
select_info_hash_index(config, rng, pareto)
)
scrape_hash_indeces.push(select_info_hash_index(config, rng, pareto))
}
let info_hash_index = select_info_hash_index(config, rng, pareto);
@ -34,52 +29,37 @@ pub fn create_torrent_peer(
scrape_hash_indeces,
connection_id,
peer_id: generate_peer_id(),
port: Port(rand::random())
port: Port(rand::random()),
}
}
fn select_info_hash_index(
config: &Config,
rng: &mut impl Rng,
pareto: Pareto<f64>,
) -> usize {
fn select_info_hash_index(config: &Config, rng: &mut impl Rng, pareto: Pareto<f64>) -> usize {
pareto_usize(rng, pareto, config.handler.number_of_torrents - 1)
}
pub fn pareto_usize(
rng: &mut impl Rng,
pareto: Pareto<f64>,
max: usize,
) -> usize {
pub fn pareto_usize(rng: &mut impl Rng, pareto: Pareto<f64>, max: usize) -> usize {
let p: f64 = rng.sample(pareto);
let p = (p.min(101.0f64) - 1.0) / 100.0;
(p * max as f64) as usize
}
pub fn generate_peer_id() -> PeerId {
PeerId(random_20_bytes())
}
pub fn generate_info_hash() -> InfoHash {
InfoHash(random_20_bytes())
}
pub fn generate_transaction_id(rng: &mut impl Rng) -> TransactionId {
TransactionId(rng.gen())
}
pub fn create_connect_request(transaction_id: TransactionId) -> Request {
(ConnectRequest { transaction_id }).into()
}
// Don't use SmallRng here for now
fn random_20_bytes() -> [u8; 20] {
let mut bytes = [0; 20];
@ -87,4 +67,4 @@ fn random_20_bytes() -> [u8; 20] {
thread_rng().fill_bytes(&mut bytes[..]);
bytes
}
}