http load test and protocol: cut down on allocations a bit

This commit is contained in:
Joakim Frostegård 2020-07-20 17:23:40 +02:00
parent 47c71376e2
commit 45940a05a9
4 changed files with 70 additions and 19 deletions

View file

@ -10,8 +10,9 @@
## aquatic_http_load_test
* request sending: too many allocations, likely due to request to byte
conversion. optimize!
* 80k responses per second is possible (as long as those weren't errors).
Probably has to do with exact timing of receiving responses and sending
requests.
* think about when to open new connections
* requests per seconds only goes up with lower poll timeout in tracker or
more connections (if they don't all send stuff at the same time), in my

View file

@ -13,12 +13,12 @@ pub fn create_random_request(
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let weights = vec![
let weights = [
config.torrents.weight_announce as u32,
config.torrents.weight_scrape as u32,
];
let items = vec![
let items = [
RequestType::Announce,
RequestType::Scrape,
];
@ -41,6 +41,7 @@ pub fn create_random_request(
}
#[inline]
fn create_announce_request(
config: &Config,
state: &LoadTestState,
@ -69,14 +70,15 @@ fn create_announce_request(
}
#[inline]
fn create_scrape_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let mut scrape_hashes = Vec::new();
let mut scrape_hashes = Vec::with_capacity(5);
for _ in 0..20 {
for _ in 0..5 {
let info_hash_index = select_info_hash_index(config, &state, rng);
scrape_hashes.push(state.info_hashes[info_hash_index]);
@ -88,6 +90,7 @@ fn create_scrape_request(
}
#[inline]
fn select_info_hash_index(
config: &Config,
state: &LoadTestState,
@ -97,6 +100,7 @@ fn select_info_hash_index(
}
#[inline]
fn pareto_usize(
rng: &mut impl Rng,
pareto: &Arc<Pareto<f64>>,

View file

@ -22,13 +22,28 @@ pub struct AnnounceRequest {
impl AnnounceRequest {
pub fn as_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
let mut bytes = Vec::with_capacity(
24 +
60 +
9 +
60 +
6 +
5 + // high estimate
6 +
2 + // estimate
14 + // FIXME event
9 +
1 +
20 + // numwant bad estimate
20 + // key bad estimate
13
);
bytes.extend_from_slice(b"GET /announce?info_hash=");
bytes.extend_from_slice(&urlencode_20_bytes(self.info_hash.0));
urlencode_20_bytes(self.info_hash.0, &mut bytes);
bytes.extend_from_slice(b"&peer_id=");
bytes.extend_from_slice(&urlencode_20_bytes(self.peer_id.0));
urlencode_20_bytes(self.info_hash.0, &mut bytes);
bytes.extend_from_slice(b"&port=");
let _ = itoa::write(&mut bytes, self.port);

View file

@ -1,4 +1,5 @@
use std::net::{Ipv4Addr, Ipv6Addr};
use std::io::Write;
use serde::Serializer;
use smartstring::{SmartString, LazyCompact};
@ -6,19 +7,15 @@ use smartstring::{SmartString, LazyCompact};
use super::response::ResponsePeer;
pub fn urlencode_20_bytes(input: [u8; 20]) -> Vec<u8> {
let mut tmp = [0u8; 40];
pub fn urlencode_20_bytes(input: [u8; 20], output: &mut impl Write){
let mut tmp = [0u8; 2];
hex::encode_to_slice(&input, &mut tmp).unwrap();
for i in 0..input.len() {
hex::encode_to_slice(&input[i..i+1], &mut tmp).unwrap();
let mut output = Vec::with_capacity(60);
for chunk in tmp.chunks_exact(2){
output.push(b'%');
output.extend_from_slice(chunk);
output.write(b"%");
output.write(&tmp);
}
output
}
@ -86,4 +83,38 @@ pub fn serialize_response_peers_ipv6<S>(
}
serializer.serialize_bytes(&bytes)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_urlencode_20_bytes(){
let mut input = [0u8; 20];
for (i, b) in input.iter_mut().enumerate(){
*b = i as u8 % 10;
}
let mut output = Vec::new();
urlencode_20_bytes(input, &mut output);
assert_eq!(output.len(), 60);
for (i, chunk) in output.chunks_exact(3).enumerate(){
// Not perfect but should do the job
let reference = [b'%', b'0', input[i] + 48];
let success = chunk == reference;
if !success {
println!("failing index: {}", i);
}
assert_eq!(chunk, reference);
}
}
}