WIP: cleanup aquatic_http_load_test and aquatic_http_protocol

This commit is contained in:
Joakim Frostegård 2020-07-20 14:48:19 +02:00
parent d1e9d24773
commit da3b2bcd8c
10 changed files with 232 additions and 410 deletions

2
Cargo.lock generated
View file

@ -104,7 +104,6 @@ dependencies = [
"anyhow", "anyhow",
"aquatic_cli_helpers", "aquatic_cli_helpers",
"aquatic_http_protocol", "aquatic_http_protocol",
"crossbeam-channel",
"hashbrown", "hashbrown",
"mimalloc", "mimalloc",
"mio", "mio",
@ -113,7 +112,6 @@ dependencies = [
"rand", "rand",
"rand_distr", "rand_distr",
"serde", "serde",
"socket2",
] ]
[[package]] [[package]]

View file

@ -12,14 +12,12 @@ name = "aquatic_http_load_test"
anyhow = "1" anyhow = "1"
aquatic_cli_helpers = { path = "../aquatic_cli_helpers" } aquatic_cli_helpers = { path = "../aquatic_cli_helpers" }
aquatic_http_protocol = { path = "../aquatic_http_protocol" } aquatic_http_protocol = { path = "../aquatic_http_protocol" }
crossbeam-channel = "0.4"
hashbrown = { version = "0.7", features = ["serde"] } hashbrown = { version = "0.7", features = ["serde"] }
mimalloc = { version = "0.1", default-features = false } mimalloc = { version = "0.1", default-features = false }
mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] } mio = { version = "0.7", features = ["udp", "os-poll", "os-util"] }
rand = { version = "0.7", features = ["small_rng"] } rand = { version = "0.7", features = ["small_rng"] }
rand_distr = "0.2" rand_distr = "0.2"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
socket2 = { version = "0.3", features = ["reuseport"] }
[dev-dependencies] [dev-dependencies]
quickcheck = "0.9" quickcheck = "0.9"

View file

@ -1,7 +1,5 @@
use std::net::SocketAddr;
use std::sync::{Arc, atomic::AtomicUsize}; use std::sync::{Arc, atomic::AtomicUsize};
use serde::{Serialize, Deserialize};
use rand_distr::Pareto; use rand_distr::Pareto;
pub use aquatic_http_protocol::common::*; pub use aquatic_http_protocol::common::*;
@ -13,136 +11,6 @@ pub use aquatic_http_protocol::request::*;
pub struct ThreadId(pub u8); pub struct ThreadId(pub u8);
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Server address
pub server_address: SocketAddr,
/// Number of sockets and socket worker threads
///
/// Sockets will bind to one port each, and with
/// multiple_client_ips = true, additionally to one IP each.
pub num_socket_workers: u8,
/// Number of workers generating requests from responses, as well as
/// requests not connected to previous ones.
pub num_request_workers: usize,
/// Run duration (quit and generate report after this many seconds)
pub duration: usize,
pub network: NetworkConfig,
pub handler: HandlerConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct NetworkConfig {
/// True means bind to one localhost IP per socket. On macOS, this by
/// default causes all server responses to go to one socket worker.
/// Default option ("true") can cause issues on macOS.
///
/// The point of multiple IPs is to possibly cause a better distribution
/// of requests to servers with SO_REUSEPORT option.
pub multiple_client_ips: bool,
/// Use Ipv6 only
pub ipv6_client: bool,
/// Number of first client port
pub first_port: u16,
/// Socket worker poll timeout in microseconds
pub poll_timeout: u64,
/// Socket worker polling event number
pub poll_event_capacity: usize,
/// Size of socket recv buffer. Use 0 for OS default.
///
/// This setting can have a big impact on dropped packages. It might
/// require changing system defaults. Some examples of commands to set
/// recommended values for different operating systems:
///
/// macOS:
/// $ sudo sysctl net.inet.udp.recvspace=6000000
/// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended
/// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended
///
/// Linux:
/// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600
pub recv_buffer: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
/// Number of torrents to simulate
pub number_of_torrents: usize,
/// Maximum number of torrents to ask about in scrape requests
pub scrape_max_torrents: usize,
/// Handler: max number of responses to collect for before processing
pub max_responses_per_iter: usize,
/// Probability that a generated request is a announce request, as part
/// of sum of the various weight arguments.
pub weight_announce: usize,
/// Probability that a generated request is a scrape request, as part
/// of sum of the various weight arguments.
pub weight_scrape: usize,
/// Handler: max microseconds to wait for single response from channel
pub channel_timeout: u64,
/// Pareto shape
///
/// Fake peers choose torrents according to Pareto distribution.
pub torrent_selection_pareto_shape: f64,
/// Probability that a generated peer is a seeder
pub peer_seeder_probability: f64,
/// Part of additional request creation calculation, meaning requests
/// which are not dependent on previous responses from server. Higher
/// means more.
pub additional_request_factor: f64,
}
impl Default for Config {
fn default() -> Self {
Self {
server_address: "127.0.0.1:3000".parse().unwrap(),
num_socket_workers: 1,
num_request_workers: 1,
duration: 0,
network: NetworkConfig::default(),
handler: HandlerConfig::default(),
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
multiple_client_ips: true,
ipv6_client: false,
first_port: 45_000,
poll_timeout: 276,
poll_event_capacity: 2_877,
recv_buffer: 6_000_000,
}
}
}
impl Default for HandlerConfig {
fn default() -> Self {
Self {
number_of_torrents: 10_000,
peer_seeder_probability: 0.25,
scrape_max_torrents: 50,
weight_announce: 5,
weight_scrape: 0,
additional_request_factor: 0.4,
max_responses_per_iter: 10_000,
channel_timeout: 200,
torrent_selection_pareto_shape: 2.0,
}
}
}
#[derive(PartialEq, Eq, Clone)] #[derive(PartialEq, Eq, Clone)]
pub struct TorrentPeer { pub struct TorrentPeer {
pub info_hash: InfoHash, pub info_hash: InfoHash,

View file

@ -0,0 +1,133 @@
use std::net::SocketAddr;
use serde::{Serialize, Deserialize};
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
/// Server address
pub server_address: SocketAddr,
/// Number of sockets and socket worker threads
///
/// Sockets will bind to one port each, and with
/// multiple_client_ips = true, additionally to one IP each.
pub num_socket_workers: u8,
/// Number of workers generating requests from responses, as well as
/// requests not connected to previous ones.
pub num_request_workers: usize,
/// Run duration (quit and generate report after this many seconds)
pub duration: usize,
pub network: NetworkConfig,
pub handler: HandlerConfig,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct NetworkConfig {
/// True means bind to one localhost IP per socket. On macOS, this by
/// default causes all server responses to go to one socket worker.
/// Default option ("true") can cause issues on macOS.
///
/// The point of multiple IPs is to possibly cause a better distribution
/// of requests to servers with SO_REUSEPORT option.
pub multiple_client_ips: bool,
/// Use Ipv6 only
pub ipv6_client: bool,
/// Number of first client port
pub first_port: u16,
/// Socket worker poll timeout in microseconds
pub poll_timeout: u64,
/// Socket worker polling event number
pub poll_event_capacity: usize,
/// Size of socket recv buffer. Use 0 for OS default.
///
/// This setting can have a big impact on dropped packages. It might
/// require changing system defaults. Some examples of commands to set
/// recommended values for different operating systems:
///
/// macOS:
/// $ sudo sysctl net.inet.udp.recvspace=6000000
/// $ sudo sysctl net.inet.udp.maxdgram=500000 # Not necessary, but recommended
/// $ sudo sysctl kern.ipc.maxsockbuf=8388608 # Not necessary, but recommended
///
/// Linux:
/// $ sudo sysctl -w net.core.rmem_max=104857600
/// $ sudo sysctl -w net.core.rmem_default=104857600
pub recv_buffer: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
pub struct HandlerConfig {
/// Number of torrents to simulate
pub number_of_torrents: usize,
/// Maximum number of torrents to ask about in scrape requests
pub scrape_max_torrents: usize,
/// Handler: max number of responses to collect for before processing
pub max_responses_per_iter: usize,
/// Probability that a generated request is a announce request, as part
/// of sum of the various weight arguments.
pub weight_announce: usize,
/// Probability that a generated request is a scrape request, as part
/// of sum of the various weight arguments.
pub weight_scrape: usize,
/// Handler: max microseconds to wait for single response from channel
pub channel_timeout: u64,
/// Pareto shape
///
/// Fake peers choose torrents according to Pareto distribution.
pub torrent_selection_pareto_shape: f64,
/// Probability that a generated peer is a seeder
pub peer_seeder_probability: f64,
/// Part of additional request creation calculation, meaning requests
/// which are not dependent on previous responses from server. Higher
/// means more.
pub additional_request_factor: f64,
}
impl Default for Config {
fn default() -> Self {
Self {
server_address: "127.0.0.1:3000".parse().unwrap(),
num_socket_workers: 1,
num_request_workers: 1,
duration: 0,
network: NetworkConfig::default(),
handler: HandlerConfig::default(),
}
}
}
impl Default for NetworkConfig {
fn default() -> Self {
Self {
multiple_client_ips: true,
ipv6_client: false,
first_port: 45_000,
poll_timeout: 276,
poll_event_capacity: 2_877,
recv_buffer: 6_000_000,
}
}
}
impl Default for HandlerConfig {
fn default() -> Self {
Self {
number_of_torrents: 10_000,
peer_seeder_probability: 0.25,
scrape_max_torrents: 50,
weight_announce: 5,
weight_scrape: 0,
additional_request_factor: 0.4,
max_responses_per_iter: 10_000,
channel_timeout: 200,
torrent_selection_pareto_shape: 2.0,
}
}
}

View file

@ -1,191 +0,0 @@
use std::time::Duration;
use std::vec::Drain;
use crossbeam_channel::{Receiver, Sender};
use rand::distributions::WeightedIndex;
use rand::prelude::*;
use crate::common::*;
use crate::utils::*;
pub fn run_handler_thread(
config: &Config,
state: LoadTestState,
request_senders: Vec<Sender<Request>>,
response_receiver: Receiver<(ThreadId, Response)>,
){
let state = &state;
let mut rng1 = SmallRng::from_rng(thread_rng())
.expect("create SmallRng from thread_rng()");
let mut rng2 = SmallRng::from_rng(thread_rng())
.expect("create SmallRng from thread_rng()");
let timeout = Duration::from_micros(config.handler.channel_timeout);
let mut responses = Vec::new();
loop {
for i in 0..config.handler.max_responses_per_iter {
let response = if i == 0 {
match response_receiver.recv(){
Ok(r) => r,
Err(_) => break, // Really shouldn't happen
}
} else {
match response_receiver.recv_timeout(timeout){
Ok(r) => r,
Err(_) => break,
}
};
responses.push(response);
}
let requests = process_responses(
config,
&state,
&mut rng1,
responses.drain(..)
);
// Somewhat dubious heuristic for deciding how fast to create
// and send additional requests (requests not having anything
// to do with previously sent requests)
let num_additional_to_send = {
let num_additional_requests = requests.iter()
.map(|v| v.len())
.sum::<usize>() as f64;
let num_new_requests_per_socket = num_additional_requests /
config.num_socket_workers as f64;
((num_new_requests_per_socket / 1.2) * config.handler.additional_request_factor) as usize + 10
};
for (channel_index, new_requests) in requests.into_iter().enumerate(){
let channel = &request_senders[channel_index];
for _ in 0..num_additional_to_send {
let request = create_random_request(
&config,
&state,
&mut rng2,
);
channel.send(request)
.expect("send request to channel in handler worker");
}
for request in new_requests.into_iter(){
channel.send(request)
.expect("send request to channel in handler worker");
}
}
}
}
fn process_responses(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
responses: Drain<(ThreadId, Response)>
) -> Vec<Vec<Request>> {
let mut new_requests = Vec::with_capacity(
config.num_socket_workers as usize
);
for _ in 0..config.num_socket_workers {
new_requests.push(Vec::new());
}
for (socket_thread_id, response) in responses.into_iter() {
let new_request = create_random_request(config, state, rng);
new_requests[socket_thread_id.0 as usize].push(new_request);
}
new_requests
}
pub fn create_random_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let weights = vec![
config.handler.weight_announce as u32,
config.handler.weight_scrape as u32,
];
let items = vec![
RequestType::Announce,
RequestType::Scrape,
];
let dist = WeightedIndex::new(&weights)
.expect("random request weighted index");
match items[dist.sample(rng)] {
RequestType::Announce => create_announce_request(
config,
state,
rng,
),
RequestType::Scrape => create_scrape_request(
config,
state,
rng,
)
}
}
fn create_announce_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let (event, bytes_left) = {
if rng.gen_bool(config.handler.peer_seeder_probability) {
(AnnounceEvent::Completed, 0)
} else {
(AnnounceEvent::Started, 50)
}
};
let info_hash_index = select_info_hash_index(config, &state, rng);
Request::Announce(AnnounceRequest {
info_hash: state.info_hashes[info_hash_index],
peer_id: PeerId(rng.gen()),
bytes_left,
event,
key: None,
numwant: None,
compact: true,
port: rng.gen()
})
}
fn create_scrape_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let mut scrape_hashes = Vec::new();
for _ in 0..20 {
let info_hash_index = select_info_hash_index(config, &state, rng);
scrape_hashes.push(state.info_hashes[info_hash_index]);
}
Request::Scrape(ScrapeRequest {
info_hashes: scrape_hashes,
})
}

View file

@ -1,22 +1,18 @@
use std::net::{SocketAddr, Ipv4Addr, Ipv6Addr};
use std::thread; use std::thread;
use std::sync::{Arc, atomic::Ordering}; use std::sync::{Arc, atomic::Ordering};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use crossbeam_channel::unbounded;
use rand::prelude::*; use rand::prelude::*;
use rand_distr::Pareto; use rand_distr::Pareto;
mod common; mod common;
mod handler; mod config;
mod network; mod network;
mod utils; mod utils;
use common::*; use common::*;
use utils::*; use config::*;
use handler::create_random_request;
use network::*; use network::*;
use handler::run_handler_thread;
#[global_allocator] #[global_allocator]
@ -43,7 +39,7 @@ fn run(config: Config) -> ::anyhow::Result<()> {
let mut rng = SmallRng::from_entropy(); let mut rng = SmallRng::from_entropy();
for _ in 0..config.handler.number_of_torrents { for _ in 0..config.handler.number_of_torrents {
info_hashes.push(generate_info_hash(&mut rng)); info_hashes.push(InfoHash(rng.gen()));
} }
let pareto = Pareto::new( let pareto = Pareto::new(
@ -59,12 +55,7 @@ fn run(config: Config) -> ::anyhow::Result<()> {
// Start socket workers // Start socket workers
let mut request_senders = Vec::new();
for _ in 0..config.num_socket_workers { for _ in 0..config.num_socket_workers {
let (sender, receiver) = unbounded();
request_senders.push(sender);
let config = config.clone(); let config = config.clone();
let state = state.clone(); let state = state.clone();
@ -72,22 +63,10 @@ fn run(config: Config) -> ::anyhow::Result<()> {
thread::spawn(move || run_socket_thread( thread::spawn(move || run_socket_thread(
&config, &config,
state, state,
receiver, 1
)); ));
} }
// Bootstrap request cycle by adding a request to each request channel
for sender in request_senders.iter(){
let request = create_random_request(
&config,
&state,
&mut thread_rng()
);
sender.send(request.into())
.expect("bootstrap: add initial request to request queue");
}
monitor_statistics( monitor_statistics(
state, state,
&config &config

View file

@ -1,49 +1,14 @@
use std::net::SocketAddr;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::time::Duration; use std::time::Duration;
use std::io::{Read, Write, ErrorKind}; use std::io::{Read, Write, ErrorKind};
use anyhow::Context;
use crossbeam_channel::{Receiver, Sender};
use hashbrown::HashMap; use hashbrown::HashMap;
use mio::{net::TcpStream, Events, Poll, Interest, Token}; use mio::{net::TcpStream, Events, Poll, Interest, Token};
use socket2::{Socket, Domain, Type, Protocol};
use rand::{rngs::SmallRng, prelude::*}; use rand::{rngs::SmallRng, prelude::*};
use crate::common::*; use crate::common::*;
use crate::handler::create_random_request; use crate::config::*;
use crate::utils::create_random_request;
const MAX_PACKET_SIZE: usize = 4096;
pub fn create_socket(
address: SocketAddr,
server_address: SocketAddr,
ipv6_only: bool
) -> ::anyhow::Result<TcpStream> {
let builder = if address.is_ipv4(){
Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp()))
} else {
Socket::new(Domain::ipv6(), Type::stream(), Some(Protocol::tcp()))
}.context("Couldn't create socket2::Socket")?;
if ipv6_only {
builder.set_only_v6(true)
.context("Couldn't put socket in ipv6 only mode")?
}
builder.set_nonblocking(true)
.context("Couldn't put socket in non-blocking mode")?;
builder.set_reuse_port(true)
.context("Couldn't put socket in reuse_port mode")?;
builder.connect(&server_address.into()).with_context(||
format!("Couldn't connect to address {}", server_address)
)?;
Ok(TcpStream::from_std(builder.into_tcp_stream()))
}
@ -58,7 +23,6 @@ pub struct Connection {
impl Connection { impl Connection {
pub fn create_and_register( pub fn create_and_register(
config: &Config, config: &Config,
state: &LoadTestState,
connections: &mut ConnectionMap, connections: &mut ConnectionMap,
poll: &mut Poll, poll: &mut Poll,
token_counter: &mut usize, token_counter: &mut usize,
@ -188,7 +152,7 @@ pub type ConnectionMap = HashMap<usize, Connection>;
pub fn run_socket_thread( pub fn run_socket_thread(
config: &Config, config: &Config,
state: LoadTestState, state: LoadTestState,
request_receiver: Receiver<Request>, num_initial_requests: usize,
) { ) {
let timeout = Duration::from_micros(config.network.poll_timeout); let timeout = Duration::from_micros(config.network.poll_timeout);
@ -199,10 +163,9 @@ pub fn run_socket_thread(
let mut token_counter = 0usize; let mut token_counter = 0usize;
for _ in request_receiver.try_recv(){ for _ in 0..num_initial_requests {
Connection::create_and_register( Connection::create_and_register(
config, config,
&state,
&mut connections, &mut connections,
&mut poll, &mut poll,
&mut token_counter, &mut token_counter,
@ -238,7 +201,6 @@ pub fn run_socket_thread(
if token_counter < 1 && responses_received > 0 { if token_counter < 1 && responses_received > 0 {
Connection::create_and_register( Connection::create_and_register(
config, config,
&state,
&mut connections, &mut connections,
&mut poll, &mut poll,
&mut token_counter, &mut token_counter,

View file

@ -1,12 +1,94 @@
use std::sync::Arc; use std::sync::Arc;
use rand_distr::{Standard, Pareto}; use rand::distributions::WeightedIndex;
use rand_distr::Pareto;
use rand::prelude::*; use rand::prelude::*;
use crate::common::*; use crate::common::*;
use crate::config::*;
pub fn select_info_hash_index( pub fn create_random_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let weights = vec![
config.handler.weight_announce as u32,
config.handler.weight_scrape as u32,
];
let items = vec![
RequestType::Announce,
RequestType::Scrape,
];
let dist = WeightedIndex::new(&weights)
.expect("random request weighted index");
match items[dist.sample(rng)] {
RequestType::Announce => create_announce_request(
config,
state,
rng,
),
RequestType::Scrape => create_scrape_request(
config,
state,
rng,
)
}
}
fn create_announce_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let (event, bytes_left) = {
if rng.gen_bool(config.handler.peer_seeder_probability) {
(AnnounceEvent::Completed, 0)
} else {
(AnnounceEvent::Started, 50)
}
};
let info_hash_index = select_info_hash_index(config, &state, rng);
Request::Announce(AnnounceRequest {
info_hash: state.info_hashes[info_hash_index],
peer_id: PeerId(rng.gen()),
bytes_left,
event,
key: None,
numwant: None,
compact: true,
port: rng.gen()
})
}
fn create_scrape_request(
config: &Config,
state: &LoadTestState,
rng: &mut impl Rng,
) -> Request {
let mut scrape_hashes = Vec::new();
for _ in 0..20 {
let info_hash_index = select_info_hash_index(config, &state, rng);
scrape_hashes.push(state.info_hashes[info_hash_index]);
}
Request::Scrape(ScrapeRequest {
info_hashes: scrape_hashes,
})
}
fn select_info_hash_index(
config: &Config, config: &Config,
state: &LoadTestState, state: &LoadTestState,
rng: &mut impl Rng, rng: &mut impl Rng,
@ -15,7 +97,7 @@ pub fn select_info_hash_index(
} }
pub fn pareto_usize( fn pareto_usize(
rng: &mut impl Rng, rng: &mut impl Rng,
pareto: &Arc<Pareto<f64>>, pareto: &Arc<Pareto<f64>>,
max: usize, max: usize,
@ -24,9 +106,4 @@ pub fn pareto_usize(
let p = (p.min(101.0f64) - 1.0) / 100.0; let p = (p.min(101.0f64) - 1.0) / 100.0;
(p * max as f64) as usize (p * max as f64) as usize
} }
pub fn generate_info_hash(rng: &mut impl Rng) -> InfoHash {
InfoHash(rng.gen())
}

View file

@ -1,5 +1,3 @@
use std::io::Write;
use anyhow::Context; use anyhow::Context;
use hashbrown::HashMap; use hashbrown::HashMap;
use smartstring::{SmartString, LazyCompact}; use smartstring::{SmartString, LazyCompact};
@ -33,19 +31,19 @@ impl AnnounceRequest {
bytes.extend_from_slice(&urlencode_20_bytes(self.peer_id.0)); bytes.extend_from_slice(&urlencode_20_bytes(self.peer_id.0));
bytes.extend_from_slice(b"&port="); bytes.extend_from_slice(b"&port=");
itoa::write(&mut bytes, self.port); let _ = itoa::write(&mut bytes, self.port);
bytes.extend_from_slice(b"&left="); bytes.extend_from_slice(b"&left=");
itoa::write(&mut bytes, self.bytes_left); let _ = itoa::write(&mut bytes, self.bytes_left);
bytes.extend_from_slice(b"&event=started"); // FIXME bytes.extend_from_slice(b"&event=started"); // FIXME
bytes.extend_from_slice(b"&compact="); bytes.extend_from_slice(b"&compact=");
itoa::write(&mut bytes, self.compact as u8); let _ = itoa::write(&mut bytes, self.compact as u8);
if let Some(numwant) = self.numwant { if let Some(numwant) = self.numwant {
bytes.extend_from_slice(b"&numwant="); bytes.extend_from_slice(b"&numwant=");
itoa::write(&mut bytes, numwant); let _ = itoa::write(&mut bytes, numwant);
} }
if let Some(ref key) = self.key { if let Some(ref key) = self.key {

View file

@ -9,7 +9,7 @@ use super::response::ResponsePeer;
pub fn urlencode_20_bytes(input: [u8; 20]) -> Vec<u8> { pub fn urlencode_20_bytes(input: [u8; 20]) -> Vec<u8> {
let mut tmp = [0u8; 40]; let mut tmp = [0u8; 40];
hex::encode_to_slice(&input, &mut tmp); hex::encode_to_slice(&input, &mut tmp).unwrap();
let mut output = Vec::with_capacity(60); let mut output = Vec::with_capacity(60);