aquatic_http: move protocol module to new crate aquatic_http_protocol

This commit is contained in:
Joakim Frostegård 2020-07-19 21:59:31 +02:00
parent 4caf174da5
commit 4ac2012a2a
36 changed files with 65 additions and 24 deletions

View file

@ -13,22 +13,12 @@ path = "src/lib/lib.rs"
name = "aquatic_http"
path = "src/bin/main.rs"
[[bench]]
name = "bench_request_from_path"
path = "benches/bench_request_from_path.rs"
harness = false
[[bench]]
name = "bench_announce_response_to_bytes"
path = "benches/bench_announce_response_to_bytes.rs"
harness = false
[dependencies]
anyhow = "1"
aquatic_cli_helpers = { path = "../aquatic_cli_helpers" }
aquatic_common = { path = "../aquatic_common" }
aquatic_common_tcp = { path = "../aquatic_common_tcp" }
bendy = { version = "0.3", features = ["std", "serde"] }
aquatic_http_protocol = { path = "../aquatic_http_protocol" }
either = "1"
flume = "0.7"
hashbrown = { version = "0.7", features = ["serde"] }
@ -48,6 +38,5 @@ simplelog = "0.8"
smartstring = "0.2"
[dev-dependencies]
criterion = "0.3"
quickcheck = "0.9"
quickcheck_macros = "0.9"

View file

@ -1,42 +0,0 @@
use std::net::Ipv4Addr;
use std::time::Duration;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use aquatic_http::protocol::response::*;
pub fn bench(c: &mut Criterion) {
let mut peers = Vec::new();
for i in 0..100 {
peers.push(ResponsePeer {
ip_address: Ipv4Addr::new(127, 0, 0, i),
port: i as u16
})
}
let announce_response = AnnounceResponse {
announce_interval: 120,
complete: 100,
incomplete: 500,
peers: ResponsePeerListV4(peers),
peers6: ResponsePeerListV6(Vec::new()),
};
let response = Response::Announce(announce_response);
c.bench_function("announce-response-to-bytes", |b| b.iter(||
Response::to_bytes(black_box(&response))
));
}
criterion_group!{
name = benches;
config = Criterion::default()
.sample_size(1000)
.measurement_time(Duration::from_secs(180))
.significance_level(0.01);
targets = bench
}
criterion_main!(benches);

View file

@ -1,24 +0,0 @@
use std::time::Duration;
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use aquatic_http::protocol::request::Request;
static INPUT: &str = "/announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-TR2940-5ert69muw5t8&port=11000&uploaded=0&downloaded=0&left=0&numwant=0&key=3ab4b977&compact=1&supportcrypto=1&event=stopped";
pub fn bench(c: &mut Criterion) {
c.bench_function("request-from-path", |b| b.iter(||
Request::from_http_get_path(black_box(INPUT))
));
}
criterion_group!{
name = benches;
config = Criterion::default()
.sample_size(1000)
.measurement_time(Duration::from_secs(180))
.significance_level(0.01);
targets = bench
}
criterion_main!(benches);

View file

@ -12,9 +12,9 @@ use smartstring::{SmartString, LazyCompact};
pub use aquatic_common::{ValidUntil, convert_ipv4_mapped_ipv4};
use crate::protocol::common::*;
use crate::protocol::request::Request;
use crate::protocol::response::{Response, ResponsePeer};
use aquatic_http_protocol::common::*;
use aquatic_http_protocol::request::Request;
use aquatic_http_protocol::response::{Response, ResponsePeer};
pub trait Ip: Copy + Eq + ::std::hash::Hash {}

View file

@ -8,12 +8,12 @@ use parking_lot::MutexGuard;
use rand::{Rng, SeedableRng, rngs::SmallRng};
use aquatic_common::extract_response_peers;
use aquatic_http_protocol::request::*;
use aquatic_http_protocol::response::*;
use crate::common::*;
use crate::config::Config;
use crate::protocol::request::*;
use crate::protocol::response::*;
pub fn run_request_worker(

View file

@ -12,7 +12,6 @@ pub mod common;
pub mod config;
pub mod handler;
pub mod network;
pub mod protocol;
pub mod tasks;
use common::*;

View file

@ -9,9 +9,9 @@ use mio::net::TcpStream;
use native_tls::{TlsAcceptor, MidHandshakeTlsStream};
use aquatic_common_tcp::network::stream::Stream;
use aquatic_http_protocol::request::Request;
use crate::common::*;
use crate::protocol::request::Request;
#[derive(Debug)]

View file

@ -12,10 +12,10 @@ use mio::{Events, Poll, Interest, Token};
use mio::net::TcpListener;
use aquatic_common_tcp::network::utils::create_listener;
use aquatic_http_protocol::response::*;
use crate::common::*;
use crate::config::Config;
use crate::protocol::response::*;
use connection::*;

View file

@ -1,71 +0,0 @@
use std::str::FromStr;
use serde::Serialize;
use super::utils::*;
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize)]
#[serde(transparent)]
pub struct PeerId(
#[serde(
serialize_with = "serialize_20_bytes",
)]
pub [u8; 20]
);
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize)]
#[serde(transparent)]
pub struct InfoHash(
#[serde(
serialize_with = "serialize_20_bytes",
)]
pub [u8; 20]
);
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AnnounceEvent {
Started,
Stopped,
Completed,
Empty
}
impl Default for AnnounceEvent {
fn default() -> Self {
Self::Empty
}
}
impl FromStr for AnnounceEvent {
type Err = String;
fn from_str(value: &str) -> std::result::Result<Self, String> {
match value {
"started" => Ok(Self::Started),
"stopped" => Ok(Self::Stopped),
"completed" => Ok(Self::Completed),
"empty" => Ok(Self::Empty),
value => Err(format!("Unknown value: {}", value))
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for InfoHash {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
let mut arr = [b'x'; 20];
arr[0] = u8::arbitrary(g);
arr[1] = u8::arbitrary(g);
arr[18] = u8::arbitrary(g);
arr[19] = u8::arbitrary(g);
Self(arr)
}
}

View file

@ -1,4 +0,0 @@
pub mod common;
pub mod request;
pub mod response;
mod utils;

View file

@ -1,323 +0,0 @@
use anyhow::Context;
use hashbrown::HashMap;
use smartstring::{SmartString, LazyCompact};
use super::common::*;
use super::utils::*;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct AnnounceRequest {
pub info_hash: InfoHash,
pub peer_id: PeerId,
pub port: u16,
pub bytes_left: usize,
pub event: AnnounceEvent,
pub compact: bool,
/// Number of response peers wanted
pub numwant: Option<usize>,
pub key: Option<SmartString<LazyCompact>>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ScrapeRequest {
pub info_hashes: Vec<InfoHash>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Request {
Announce(AnnounceRequest),
Scrape(ScrapeRequest),
}
impl Request {
/// Parse Request from http path (GET `/announce?info_hash=...`)
///
/// Existing serde-url decode crates were insufficient, so the decision was
/// made to create a custom parser. serde_urlencoded doesn't support multiple
/// values with same key, and serde_qs pulls in lots of dependencies. Both
/// would need preprocessing for the binary format used for info_hash and
/// peer_id.
pub fn from_http_get_path(path: &str) -> anyhow::Result<Self> {
::log::debug!("request GET path: {}", path);
let mut split_parts= path.splitn(2, '?');
let location = split_parts.next()
.with_context(|| "no location")?;
let query_string = split_parts.next()
.with_context(|| "no query string")?;
let mut info_hashes = Vec::new();
let mut data = HashMap::new();
Self::parse_key_value_pairs_memchr(
&mut info_hashes,
&mut data,
query_string
)?;
if location == "/announce" {
let numwant = if let Some(s) = data.remove("numwant"){
let numwant = s.parse::<usize>()
.map_err(|err|
anyhow::anyhow!("parse 'numwant': {}", err)
)?;
Some(numwant)
} else {
None
};
let key = if let Some(s) = data.remove("key"){
if s.len() > 100 {
return Err(anyhow::anyhow!("'key' is too long"))
}
Some(s)
} else {
None
};
let port = if let Some(port) = data.remove("port"){
port.parse().with_context(|| "parse port")?
} else {
return Err(anyhow::anyhow!("no port"));
};
let bytes_left = if let Some(left) = data.remove("left"){
left.parse().with_context(|| "parse bytes left")?
} else {
return Err(anyhow::anyhow!("no left"));
};
let event = if let Some(event) = data.remove("event"){
if let Ok(event) = event.parse(){
event
} else {
return Err(anyhow::anyhow!("invalid event: {}", event));
}
} else {
AnnounceEvent::default()
};
let compact = if let Some(compact) = data.remove("compact"){
if compact.as_str() == "1" {
true
} else {
return Err(anyhow::anyhow!("compact set, but not to 1"));
}
} else {
true
};
let request = AnnounceRequest {
info_hash: info_hashes.pop()
.with_context(|| "no info_hash")
.and_then(deserialize_20_bytes)
.map(InfoHash)?,
peer_id: data.remove("peer_id")
.with_context(|| "no peer_id")
.and_then(deserialize_20_bytes)
.map(PeerId)?,
port,
bytes_left,
event,
compact,
numwant,
key,
};
Ok(Request::Announce(request))
} else {
let mut parsed_info_hashes = Vec::with_capacity(info_hashes.len());
for info_hash in info_hashes {
parsed_info_hashes.push(InfoHash(deserialize_20_bytes(info_hash)?));
}
let request = ScrapeRequest {
info_hashes: parsed_info_hashes,
};
Ok(Request::Scrape(request))
}
}
/// Seems to be somewhat faster than non-memchr version
fn parse_key_value_pairs_memchr<'a>(
info_hashes: &mut Vec<SmartString<LazyCompact>>,
data: &mut HashMap<&'a str, SmartString<LazyCompact>>,
query_string: &'a str,
) -> anyhow::Result<()> {
let query_string_bytes = query_string.as_bytes();
let mut ampersand_iter = ::memchr::memchr_iter(b'&', query_string_bytes);
let mut position = 0usize;
for equal_sign_index in ::memchr::memchr_iter(b'=', query_string_bytes){
let segment_end = ampersand_iter.next()
.unwrap_or(query_string.len());
let key = query_string.get(position..equal_sign_index)
.with_context(|| format!("no key at {}..{}", position, equal_sign_index))?;
let value = query_string.get(equal_sign_index + 1..segment_end)
.with_context(|| format!("no value at {}..{}", equal_sign_index + 1, segment_end))?;
// whitelist keys to avoid having to use ddos-resistant hashmap
match key {
"info_hash" => {
let value = Self::urldecode_memchr(value)?;
info_hashes.push(value);
},
"peer_id" | "port" | "left" | "event" | "compact" | "numwant" | "key" => {
let value = Self::urldecode_memchr(value)?;
data.insert(key, value);
},
k => {
::log::info!("ignored unrecognized key: {}", k)
}
}
if segment_end == query_string.len(){
break
} else {
position = segment_end + 1;
}
}
Ok(())
}
/// The info hashes and peer id's that are received are url-encoded byte
/// by byte, e.g., %fa for byte 0xfa. However, they need to be parsed as
/// UTF-8 string, meaning that non-ascii bytes are invalid characters.
/// Therefore, these bytes must be converted to their equivalent multi-byte
/// UTF-8 encodings.
fn urldecode(value: &str) -> anyhow::Result<String> {
let mut processed = String::new();
for (i, part) in value.split('%').enumerate(){
if i == 0 {
processed.push_str(part);
} else if part.len() >= 2 {
let mut two_first = String::with_capacity(2);
for (j, c) in part.chars().enumerate(){
if j == 0 {
two_first.push(c);
} else if j == 1 {
two_first.push(c);
let byte = u8::from_str_radix(&two_first, 16)?;
processed.push(byte as char);
} else {
processed.push(c);
}
}
} else {
return Err(anyhow::anyhow!(
"url decode: too few characters in '%{}'", part
))
}
}
Ok(processed)
}
/// Quite a bit faster than non-memchr version
fn urldecode_memchr(value: &str) -> anyhow::Result<SmartString<LazyCompact>> {
let mut processed = SmartString::new();
let bytes = value.as_bytes();
let iter = ::memchr::memchr_iter(b'%', bytes);
let mut str_index_after_hex = 0usize;
for i in iter {
match (bytes.get(i), bytes.get(i + 1), bytes.get(i + 2)){
(Some(0..=127), Some(0..=127), Some(0..=127)) => {
if i > 0 {
processed.push_str(&value[str_index_after_hex..i]);
}
str_index_after_hex = i + 3;
let hex = &value[i + 1..i + 3];
let byte = u8::from_str_radix(&hex, 16)?;
processed.push(byte as char);
},
_ => {
return Err(anyhow::anyhow!(
"invalid urlencoded segment at byte {} in {}", i, value
));
}
}
}
if let Some(rest_of_str) = value.get(str_index_after_hex..){
processed.push_str(rest_of_str);
}
Ok(processed)
}
}
#[cfg(test)]
mod tests {
use super::*;
static ANNOUNCE_REQUEST_PATH: &str = "/announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-ABC940-5ert69muw5t8&port=12345&uploaded=0&downloaded=0&left=1&numwant=0&key=4ab4b877&compact=1&supportcrypto=1&event=started";
static SCRAPE_REQUEST_PATH: &str = "/scrape?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9";
static REFERENCE_INFO_HASH: [u8; 20] = [0x04, 0x0b, b'k', b'V', 0x3f, 0x5c, b'r', 0x14, 0xa6, 0xb7, 0x98, 0xad, b'C', 0xc3, 0xc9, b'.', 0x40, 0x24, 0x00, 0xb9];
static REFERENCE_PEER_ID: [u8; 20] = [b'-', b'A', b'B', b'C', b'9', b'4', b'0', b'-', b'5', b'e', b'r', b't', b'6', b'9', b'm', b'u', b'w', b'5', b't', b'8'];
#[test]
fn test_urldecode(){
let f = Request::urldecode_memchr;
assert_eq!(f("").unwrap(), "".to_string());
assert_eq!(f("abc").unwrap(), "abc".to_string());
assert_eq!(f("%21").unwrap(), "!".to_string());
assert_eq!(f("%21%3D").unwrap(), "!=".to_string());
assert_eq!(f("abc%21def%3Dghi").unwrap(), "abc!def=ghi".to_string());
assert!(f("%").is_err());
assert!(f("%å7").is_err());
}
#[test]
fn test_announce_request_from_path(){
let parsed_request = Request::from_http_get_path(
ANNOUNCE_REQUEST_PATH
).unwrap();
let reference_request = Request::Announce(AnnounceRequest {
info_hash: InfoHash(REFERENCE_INFO_HASH),
peer_id: PeerId(REFERENCE_PEER_ID),
port: 12345,
bytes_left: 1,
event: AnnounceEvent::Started,
compact: true,
numwant: Some(0),
key: Some("4ab4b877".into())
});
assert_eq!(parsed_request, reference_request);
}
#[test]
fn test_scrape_request_from_path(){
let parsed_request = Request::from_http_get_path(
SCRAPE_REQUEST_PATH
).unwrap();
let reference_request = Request::Scrape(ScrapeRequest {
info_hashes: vec![InfoHash(REFERENCE_INFO_HASH)],
});
assert_eq!(parsed_request, reference_request);
}
}

View file

@ -1,317 +0,0 @@
use std::net::{Ipv4Addr, Ipv6Addr};
use std::collections::BTreeMap;
use serde::Serialize;
use super::common::*;
use super::utils::*;
#[derive(Debug, Clone, Serialize)]
pub struct ResponsePeer<I>{
pub ip_address: I,
pub port: u16
}
#[derive(Debug, Clone, Serialize)]
#[serde(transparent)]
pub struct ResponsePeerListV4(
#[serde(serialize_with = "serialize_response_peers_ipv4")]
pub Vec<ResponsePeer<Ipv4Addr>>
);
#[derive(Debug, Clone, Serialize)]
#[serde(transparent)]
pub struct ResponsePeerListV6(
#[serde(serialize_with = "serialize_response_peers_ipv6")]
pub Vec<ResponsePeer<Ipv6Addr>>
);
#[derive(Debug, Clone, Serialize)]
pub struct ScrapeStatistics {
pub complete: usize,
pub incomplete: usize,
pub downloaded: usize,
}
#[derive(Debug, Clone, Serialize)]
pub struct AnnounceResponse {
#[serde(rename = "interval")]
pub announce_interval: usize,
pub complete: usize,
pub incomplete: usize,
pub peers: ResponsePeerListV4,
pub peers6: ResponsePeerListV6,
}
impl AnnounceResponse {
fn to_bytes(&self) -> Vec<u8> {
let peers_bytes_len = self.peers.0.len() * 6;
let peers6_bytes_len = self.peers6.0.len() * 18;
let mut bytes = Vec::with_capacity(
12 +
5 + // Upper estimate
15 +
5 + // Upper estimate
12 +
5 + // Upper estimate
8 +
peers_bytes_len +
8 +
peers6_bytes_len +
1
);
bytes.extend_from_slice(b"d8:completei");
let _ = itoa::write(&mut bytes, self.complete);
bytes.extend_from_slice(b"e10:incompletei");
let _ = itoa::write(&mut bytes, self.incomplete);
bytes.extend_from_slice(b"e8:intervali");
let _ = itoa::write(&mut bytes, self.announce_interval);
bytes.extend_from_slice(b"e5:peers");
let _ = itoa::write(&mut bytes, peers_bytes_len);
bytes.push(b':');
for peer in self.peers.0.iter() {
bytes.extend_from_slice(&u32::from(peer.ip_address).to_be_bytes());
bytes.extend_from_slice(&peer.port.to_be_bytes())
}
bytes.extend_from_slice(b"6:peers6");
let _ = itoa::write(&mut bytes, peers6_bytes_len);
bytes.push(b':');
for peer in self.peers6.0.iter() {
bytes.extend_from_slice(&u128::from(peer.ip_address).to_be_bytes());
bytes.extend_from_slice(&peer.port.to_be_bytes())
}
bytes.push(b'e');
bytes
}
}
#[derive(Debug, Clone, Serialize)]
pub struct ScrapeResponse {
/// BTreeMap instead of HashMap since keys need to be serialized in order
pub files: BTreeMap<InfoHash, ScrapeStatistics>,
}
impl ScrapeResponse {
fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::with_capacity(
9 +
self.files.len() * (
3 +
20 +
12 +
5 + // Upper estimate
31 +
5 + // Upper estimate
2
) +
2
);
bytes.extend_from_slice(b"d5:filesd");
for (info_hash, statistics) in self.files.iter(){
bytes.extend_from_slice(b"20:");
bytes.extend_from_slice(&info_hash.0);
bytes.extend_from_slice(b"d8:completei");
let _ = itoa::write(&mut bytes, statistics.complete);
bytes.extend_from_slice(b"e10:downloadedi0e10:incompletei");
let _ = itoa::write(&mut bytes, statistics.incomplete);
bytes.extend_from_slice(b"ee");
}
bytes.extend_from_slice(b"ee");
bytes
}
}
#[derive(Debug, Clone, Serialize)]
pub struct FailureResponse {
pub failure_reason: String,
}
impl FailureResponse {
fn to_bytes(&self) -> Vec<u8> {
let reason_bytes = self.failure_reason.as_bytes();
let mut bytes = Vec::with_capacity(
18 +
3 + // Upper estimate
1 +
reason_bytes.len() +
1
);
bytes.extend_from_slice(b"d14:failure_reason");
let _ = itoa::write(&mut bytes, reason_bytes.len());
bytes.push(b':');
bytes.extend_from_slice(reason_bytes);
bytes.push(b'e');
bytes
}
}
#[derive(Debug, Clone, Serialize)]
#[serde(untagged)]
pub enum Response {
Announce(AnnounceResponse),
Scrape(ScrapeResponse),
Failure(FailureResponse),
}
impl Response {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
Response::Announce(r) => r.to_bytes(),
Response::Failure(r) => r.to_bytes(),
Response::Scrape(r) => r.to_bytes(),
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ResponsePeer<Ipv4Addr> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self {
ip_address: Ipv4Addr::arbitrary(g),
port: u16::arbitrary(g)
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ResponsePeer<Ipv6Addr> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self {
ip_address: Ipv6Addr::arbitrary(g),
port: u16::arbitrary(g)
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ResponsePeerListV4 {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self(Vec::arbitrary(g))
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ResponsePeerListV6 {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self(Vec::arbitrary(g))
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ScrapeStatistics {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self {
complete: usize::arbitrary(g),
incomplete: usize::arbitrary(g),
downloaded: 0,
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for AnnounceResponse {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self {
announce_interval: usize::arbitrary(g),
complete: usize::arbitrary(g),
incomplete: usize::arbitrary(g),
peers: ResponsePeerListV4::arbitrary(g),
peers6: ResponsePeerListV6::arbitrary(g),
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for ScrapeResponse {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self {
files: BTreeMap::arbitrary(g),
}
}
}
#[cfg(test)]
impl quickcheck::Arbitrary for FailureResponse {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
Self {
failure_reason: String::arbitrary(g),
}
}
}
#[cfg(test)]
mod tests {
use quickcheck_macros::*;
use super::*;
#[quickcheck]
fn test_announce_response_to_bytes(response: AnnounceResponse) -> bool {
let reference = bendy::serde::to_bytes(
&Response::Announce(response.clone())
).unwrap();
response.to_bytes() == reference
}
#[quickcheck]
fn test_scrape_response_to_bytes(response: ScrapeResponse) -> bool {
let reference = bendy::serde::to_bytes(
&Response::Scrape(response.clone())
).unwrap();
let hand_written = response.to_bytes();
let success = hand_written == reference;
if !success {
println!("reference: {}", String::from_utf8_lossy(&reference));
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
}
success
}
#[quickcheck]
fn test_failure_response_to_bytes(response: FailureResponse) -> bool {
let reference = bendy::serde::to_bytes(
&Response::Failure(response.clone())
).unwrap();
response.to_bytes() == reference
}
}

View file

@ -1,73 +0,0 @@
use std::net::{Ipv4Addr, Ipv6Addr};
use serde::Serializer;
use smartstring::{SmartString, LazyCompact};
use super::response::ResponsePeer;
/// Not for serde
pub fn deserialize_20_bytes(value: SmartString<LazyCompact>) -> anyhow::Result<[u8; 20]> {
let mut arr = [0u8; 20];
let mut char_iter = value.chars();
for a in arr.iter_mut(){
if let Some(c) = char_iter.next(){
if c as u32 > 255 {
return Err(anyhow::anyhow!(
"character not in single byte range: {:#?}",
c
));
}
*a = c as u8;
} else {
return Err(anyhow::anyhow!("less than 20 bytes: {:#?}", value));
}
}
if char_iter.next().is_some(){
Err(anyhow::anyhow!("more than 20 bytes: {:#?}", value))
} else {
Ok(arr)
}
}
#[inline]
pub fn serialize_20_bytes<S>(
bytes: &[u8; 20],
serializer: S
) -> Result<S::Ok, S::Error> where S: Serializer {
serializer.serialize_bytes(bytes)
}
pub fn serialize_response_peers_ipv4<S>(
response_peers: &[ResponsePeer<Ipv4Addr>],
serializer: S
) -> Result<S::Ok, S::Error> where S: Serializer {
let mut bytes = Vec::with_capacity(response_peers.len() * 6);
for peer in response_peers {
bytes.extend_from_slice(&u32::from(peer.ip_address).to_be_bytes());
bytes.extend_from_slice(&peer.port.to_be_bytes())
}
serializer.serialize_bytes(&bytes)
}
pub fn serialize_response_peers_ipv6<S>(
response_peers: &[ResponsePeer<Ipv6Addr>],
serializer: S
) -> Result<S::Ok, S::Error> where S: Serializer {
let mut bytes = Vec::with_capacity(response_peers.len() * 6);
for peer in response_peers {
bytes.extend_from_slice(&u128::from(peer.ip_address).to_be_bytes());
bytes.extend_from_slice(&peer.port.to_be_bytes())
}
serializer.serialize_bytes(&bytes)
}

View file

@ -1 +0,0 @@
{"group_id":"announce-response-to-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"announce-response-to-bytes","directory_name":"announce-response-to-bytes","title":"announce-response-to-bytes"}

View file

@ -1 +0,0 @@
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":6033.211414448978,"upper_bound":6077.812796004471},"point_estimate":6054.625623439862,"standard_error":11.387162302248655},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5978.799232230455,"upper_bound":6005.189535363421},"point_estimate":5992.745967541798,"standard_error":6.185398365563177},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":157.08470879401094,"upper_bound":190.1634482791119},"point_estimate":175.51713287349847,"standard_error":8.3821979113297},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":6052.909623777413,"upper_bound":6106.324900686703},"point_estimate":6078.257114077077,"standard_error":13.648790489926581},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":285.8045348063516,"upper_bound":442.7497149360172},"point_estimate":363.44843558752416,"standard_error":40.16921333191484}}

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[5184.137608004838,5534.60305616611,6469.177584596171,6819.643032757444]

View file

@ -1 +0,0 @@
{"group_id":"announce-response-to-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"announce-response-to-bytes","directory_name":"announce-response-to-bytes","title":"announce-response-to-bytes"}

View file

@ -1 +0,0 @@
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":419.88612634751735,"upper_bound":425.38508524104503},"point_estimate":422.5234574675309,"standard_error":1.4143626861263792},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":411.1840254440792,"upper_bound":413.2648196433366},"point_estimate":412.0626077394812,"standard_error":0.539288755707038},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":11.914005345139904,"upper_bound":13.958948147472947},"point_estimate":12.859769956741538,"standard_error":0.5147456144787788},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":413.760828155085,"upper_bound":417.06705955616684},"point_estimate":415.33960474085285,"standard_error":0.844822382993726},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":37.145365634474445,"upper_bound":51.978453081812695},"point_estimate":44.61780477344027,"standard_error":3.780806408623577}}

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[351.7895769512653,377.8892152978113,447.4882508886006,473.5878892351466]

View file

@ -1 +0,0 @@
{"group_id":"request-from-path","function_id":null,"value_str":null,"throughput":null,"full_id":"request-from-path","directory_name":"request-from-path","title":"request-from-path"}

View file

@ -1 +0,0 @@
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":2144.1723727709154,"upper_bound":2153.112440385677},"point_estimate":2148.498471928281,"standard_error":2.2819363399119816},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":2136.1098212730085,"upper_bound":2142.6570382247664},"point_estimate":2139.175575113935,"standard_error":1.677012549554238},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":44.72773439758049,"upper_bound":52.312043449354604},"point_estimate":48.23289034620888,"standard_error":1.8876949157852096},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":2148.408988674169,"upper_bound":2158.642726752668},"point_estimate":2153.014596603725,"standard_error":2.6164468826051843},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":60.092727504812785,"upper_bound":85.59866236503464},"point_estimate":72.3824856801523,"standard_error":6.570166078942696}}

File diff suppressed because it is too large Load diff

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[1910.369982082539,2008.6428012110698,2270.7036522204853,2368.976471349016]

View file

@ -1 +0,0 @@
{"group_id":"request-from-path","function_id":null,"value_str":null,"throughput":null,"full_id":"request-from-path","directory_name":"request-from-path","title":"request-from-path"}

View file

@ -1 +0,0 @@
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5824.69387789579,"upper_bound":5873.955231860259},"point_estimate":5848.060278693576,"standard_error":12.63942492388433},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5758.391531674525,"upper_bound":5785.1963683827635},"point_estimate":5773.408873075845,"standard_error":7.037696580466141},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":157.64160863960012,"upper_bound":183.44129621404062},"point_estimate":170.6598530443819,"standard_error":6.5622373240938945},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5832.402565777844,"upper_bound":5887.40667791996},"point_estimate":5858.714713470792,"standard_error":14.059707391986894},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":316.17183558077096,"upper_bound":500.59673643745384},"point_estimate":400.8496885755836,"standard_error":48.62413636310262}}

File diff suppressed because one or more lines are too long

View file

@ -1 +0,0 @@
[4974.032740058845,5318.589771364887,6237.408521514334,6581.965552820377]