mirror of
https://github.com/YGGverse/aquatic.git
synced 2026-03-31 17:55:36 +00:00
Move all crates to new crates dir
This commit is contained in:
parent
3835da22ac
commit
9b032f7e24
128 changed files with 27 additions and 26 deletions
21
crates/combined_binary/Cargo.toml
Normal file
21
crates/combined_binary/Cargo.toml
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
[package]
|
||||
name = "aquatic"
|
||||
description = "High-performance open BitTorrent tracker (UDP, HTTP, WebTorrent)"
|
||||
keywords = ["bittorrent", "torrent", "webtorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_http.workspace = true
|
||||
aquatic_udp.workspace = true
|
||||
aquatic_ws.workspace = true
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
91
crates/combined_binary/src/main.rs
Normal file
91
crates/combined_binary/src/main.rs
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
use aquatic_common::cli::{print_help, run_app_with_cli_and_config, Options};
|
||||
use aquatic_http::config::Config as HttpConfig;
|
||||
use aquatic_udp::config::Config as UdpConfig;
|
||||
use aquatic_ws::config::Config as WsConfig;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
const APP_NAME: &str = "aquatic: BitTorrent tracker";
|
||||
|
||||
fn main() {
|
||||
::std::process::exit(match run() {
|
||||
Ok(()) => 0,
|
||||
Err(None) => {
|
||||
print_help(|| gen_info(), None);
|
||||
|
||||
0
|
||||
}
|
||||
Err(opt_err @ Some(_)) => {
|
||||
print_help(|| gen_info(), opt_err);
|
||||
|
||||
1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn run() -> Result<(), Option<String>> {
|
||||
let mut arg_iter = ::std::env::args().skip(1);
|
||||
|
||||
let protocol = if let Some(protocol) = arg_iter.next() {
|
||||
protocol
|
||||
} else {
|
||||
return Err(None);
|
||||
};
|
||||
|
||||
let options = match Options::parse_args(arg_iter) {
|
||||
Ok(options) => options,
|
||||
Err(opt_err) => {
|
||||
return Err(opt_err);
|
||||
}
|
||||
};
|
||||
|
||||
match protocol.as_str() {
|
||||
"udp" => run_app_with_cli_and_config::<UdpConfig>(
|
||||
aquatic_udp::APP_NAME,
|
||||
aquatic_udp::APP_VERSION,
|
||||
aquatic_udp::run,
|
||||
Some(options),
|
||||
),
|
||||
"http" => run_app_with_cli_and_config::<HttpConfig>(
|
||||
aquatic_http::APP_NAME,
|
||||
aquatic_http::APP_VERSION,
|
||||
aquatic_http::run,
|
||||
Some(options),
|
||||
),
|
||||
"ws" => run_app_with_cli_and_config::<WsConfig>(
|
||||
aquatic_ws::APP_NAME,
|
||||
aquatic_ws::APP_VERSION,
|
||||
aquatic_ws::run,
|
||||
Some(options),
|
||||
),
|
||||
arg => {
|
||||
let opt_err = if arg == "-h" || arg == "--help" {
|
||||
None
|
||||
} else if arg.chars().next() == Some('-') {
|
||||
Some("First argument must be protocol".to_string())
|
||||
} else {
|
||||
Some("Invalid protocol".to_string())
|
||||
};
|
||||
|
||||
return Err(opt_err);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn gen_info() -> String {
|
||||
let mut info = String::new();
|
||||
|
||||
info.push_str(APP_NAME);
|
||||
|
||||
let app_path = ::std::env::args().next().unwrap();
|
||||
info.push_str(&format!("\n\nUsage: {} PROTOCOL [OPTIONS]", app_path));
|
||||
info.push_str("\n\nAvailable protocols:");
|
||||
info.push_str("\n udp BitTorrent over UDP");
|
||||
info.push_str("\n http BitTorrent over HTTP");
|
||||
info.push_str("\n ws WebTorrent");
|
||||
|
||||
info
|
||||
}
|
||||
41
crates/common/Cargo.toml
Normal file
41
crates/common/Cargo.toml
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
[package]
|
||||
name = "aquatic_common"
|
||||
description = "aquatic BitTorrent tracker common code"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_common"
|
||||
|
||||
[features]
|
||||
rustls = ["dep:rustls", "rustls-pemfile"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_toml_config.workspace = true
|
||||
|
||||
ahash = "0.8"
|
||||
anyhow = "1"
|
||||
arc-swap = "1"
|
||||
duplicate = "1"
|
||||
git-testament = "0.2"
|
||||
hashbrown = "0.14"
|
||||
hex = "0.4"
|
||||
indexmap = "2"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
privdrop = "0.5"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
simple_logger = { version = "4", features = ["stderr"] }
|
||||
toml = "0.5"
|
||||
|
||||
# Optional
|
||||
glommio = { version = "0.8", optional = true }
|
||||
hwloc = { version = "0.5", optional = true }
|
||||
rustls = { version = "0.21", optional = true }
|
||||
rustls-pemfile = { version = "1", optional = true }
|
||||
196
crates/common/src/access_list.rs
Normal file
196
crates/common/src/access_list.rs
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use arc_swap::{ArcSwap, Cache};
|
||||
use hashbrown::HashSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Access list mode. Available modes are allow, deny and off.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AccessListMode {
|
||||
/// Only serve torrents with info hash present in file
|
||||
Allow,
|
||||
/// Do not serve torrents if info hash present in file
|
||||
Deny,
|
||||
/// Turn off access list functionality
|
||||
Off,
|
||||
}
|
||||
|
||||
impl AccessListMode {
|
||||
pub fn is_on(&self) -> bool {
|
||||
!matches!(self, Self::Off)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct AccessListConfig {
|
||||
pub mode: AccessListMode,
|
||||
/// Path to access list file consisting of newline-separated hex-encoded info hashes.
|
||||
///
|
||||
/// If using chroot mode, path must be relative to new root.
|
||||
pub path: PathBuf,
|
||||
}
|
||||
|
||||
impl Default for AccessListConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: "./access-list.txt".into(),
|
||||
mode: AccessListMode::Off,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct AccessList(HashSet<[u8; 20]>);
|
||||
|
||||
impl AccessList {
|
||||
pub fn insert_from_line(&mut self, line: &str) -> anyhow::Result<()> {
|
||||
self.0.insert(parse_info_hash(line)?);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_from_path(path: &PathBuf) -> anyhow::Result<Self> {
|
||||
let file = File::open(path)?;
|
||||
let reader = BufReader::new(file);
|
||||
|
||||
let mut new_list = Self::default();
|
||||
|
||||
for line in reader.lines() {
|
||||
let line = line?;
|
||||
let line = line.trim();
|
||||
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
new_list
|
||||
.insert_from_line(&line)
|
||||
.with_context(|| format!("Invalid line in access list: {}", line))?;
|
||||
}
|
||||
|
||||
Ok(new_list)
|
||||
}
|
||||
|
||||
pub fn allows(&self, mode: AccessListMode, info_hash: &[u8; 20]) -> bool {
|
||||
match mode {
|
||||
AccessListMode::Allow => self.0.contains(info_hash),
|
||||
AccessListMode::Deny => !self.0.contains(info_hash),
|
||||
AccessListMode::Off => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AccessListQuery {
|
||||
fn update(&self, config: &AccessListConfig) -> anyhow::Result<()>;
|
||||
fn allows(&self, list_mode: AccessListMode, info_hash_bytes: &[u8; 20]) -> bool;
|
||||
}
|
||||
|
||||
pub type AccessListArcSwap = ArcSwap<AccessList>;
|
||||
pub type AccessListCache = Cache<Arc<AccessListArcSwap>, Arc<AccessList>>;
|
||||
|
||||
impl AccessListQuery for AccessListArcSwap {
|
||||
fn update(&self, config: &AccessListConfig) -> anyhow::Result<()> {
|
||||
self.store(Arc::new(AccessList::create_from_path(&config.path)?));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn allows(&self, mode: AccessListMode, info_hash_bytes: &[u8; 20]) -> bool {
|
||||
match mode {
|
||||
AccessListMode::Allow => self.load().0.contains(info_hash_bytes),
|
||||
AccessListMode::Deny => !self.load().0.contains(info_hash_bytes),
|
||||
AccessListMode::Off => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_access_list_cache(arc_swap: &Arc<AccessListArcSwap>) -> AccessListCache {
|
||||
Cache::from(Arc::clone(arc_swap))
|
||||
}
|
||||
|
||||
pub fn update_access_list(
|
||||
config: &AccessListConfig,
|
||||
access_list: &Arc<AccessListArcSwap>,
|
||||
) -> anyhow::Result<()> {
|
||||
if config.mode.is_on() {
|
||||
match access_list.update(config) {
|
||||
Ok(()) => {
|
||||
::log::info!("Access list updated")
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("Updating access list failed: {:#}", err);
|
||||
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_info_hash(line: &str) -> anyhow::Result<[u8; 20]> {
|
||||
let mut bytes = [0u8; 20];
|
||||
|
||||
hex::decode_to_slice(line, &mut bytes)?;
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_info_hash() {
|
||||
let f = parse_info_hash;
|
||||
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeeee".into()).is_ok());
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeeeef".into()).is_err());
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeee".into()).is_err());
|
||||
assert!(f("aaaabbbbccccddddeeeeaaaabbbbccccddddeeeö".into()).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_allows() {
|
||||
let mut access_list = AccessList::default();
|
||||
|
||||
let a = parse_info_hash("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap();
|
||||
let b = parse_info_hash("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap();
|
||||
let c = parse_info_hash("cccccccccccccccccccccccccccccccccccccccc").unwrap();
|
||||
|
||||
access_list.0.insert(a);
|
||||
access_list.0.insert(b);
|
||||
|
||||
let access_list = Arc::new(ArcSwap::new(Arc::new(access_list)));
|
||||
|
||||
let mut access_list_cache = Cache::new(Arc::clone(&access_list));
|
||||
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Allow, &a));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Allow, &b));
|
||||
assert!(!access_list_cache.load().allows(AccessListMode::Allow, &c));
|
||||
|
||||
assert!(!access_list_cache.load().allows(AccessListMode::Deny, &a));
|
||||
assert!(!access_list_cache.load().allows(AccessListMode::Deny, &b));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Deny, &c));
|
||||
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Off, &a));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Off, &b));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Off, &c));
|
||||
|
||||
access_list.store(Arc::new(AccessList::default()));
|
||||
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Deny, &a));
|
||||
assert!(access_list_cache.load().allows(AccessListMode::Deny, &b));
|
||||
}
|
||||
}
|
||||
239
crates/common/src/cli.rs
Normal file
239
crates/common/src/cli.rs
Normal file
|
|
@ -0,0 +1,239 @@
|
|||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use git_testament::{git_testament, CommitKind};
|
||||
use log::LevelFilter;
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use simple_logger::SimpleLogger;
|
||||
|
||||
/// Log level. Available values are off, error, warn, info, debug and trace.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum LogLevel {
|
||||
Off,
|
||||
Error,
|
||||
Warn,
|
||||
Info,
|
||||
Debug,
|
||||
Trace,
|
||||
}
|
||||
|
||||
impl Default for LogLevel {
|
||||
fn default() -> Self {
|
||||
Self::Warn
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Config: Default + TomlConfig + DeserializeOwned + std::fmt::Debug {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Options {
|
||||
config_file: Option<String>,
|
||||
print_config: bool,
|
||||
print_parsed_config: bool,
|
||||
print_version: bool,
|
||||
}
|
||||
|
||||
impl Options {
|
||||
pub fn parse_args<I>(mut arg_iter: I) -> Result<Options, Option<String>>
|
||||
where
|
||||
I: Iterator<Item = String>,
|
||||
{
|
||||
let mut options = Options::default();
|
||||
|
||||
loop {
|
||||
if let Some(arg) = arg_iter.next() {
|
||||
match arg.as_str() {
|
||||
"-c" | "--config-file" => {
|
||||
if let Some(path) = arg_iter.next() {
|
||||
options.config_file = Some(path);
|
||||
} else {
|
||||
return Err(Some("No config file path given".to_string()));
|
||||
}
|
||||
}
|
||||
"-p" | "--print-config" => {
|
||||
options.print_config = true;
|
||||
}
|
||||
"-P" => {
|
||||
options.print_parsed_config = true;
|
||||
}
|
||||
"-v" | "--version" => {
|
||||
options.print_version = true;
|
||||
}
|
||||
"-h" | "--help" => {
|
||||
return Err(None);
|
||||
}
|
||||
"" => (),
|
||||
_ => {
|
||||
return Err(Some("Unrecognized argument".to_string()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(options)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run_app_with_cli_and_config<T>(
|
||||
app_title: &str,
|
||||
crate_version: &str,
|
||||
// Function that takes config file and runs application
|
||||
app_fn: fn(T) -> anyhow::Result<()>,
|
||||
opts: Option<Options>,
|
||||
) where
|
||||
T: Config,
|
||||
{
|
||||
::std::process::exit(match run_inner(app_title, crate_version, app_fn, opts) {
|
||||
Ok(()) => 0,
|
||||
Err(err) => {
|
||||
eprintln!("Error: {:#}", err);
|
||||
|
||||
1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn run_inner<T>(
|
||||
app_title: &str,
|
||||
crate_version: &str,
|
||||
// Function that takes config file and runs application
|
||||
app_fn: fn(T) -> anyhow::Result<()>,
|
||||
// Possibly preparsed options
|
||||
options: Option<Options>,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
T: Config,
|
||||
{
|
||||
let options = if let Some(options) = options {
|
||||
options
|
||||
} else {
|
||||
let mut arg_iter = ::std::env::args();
|
||||
|
||||
let app_path = arg_iter.next().unwrap();
|
||||
|
||||
match Options::parse_args(arg_iter) {
|
||||
Ok(options) => options,
|
||||
Err(opt_err) => {
|
||||
let gen_info = || format!("{}\n\nUsage: {} [OPTIONS]", app_title, app_path);
|
||||
|
||||
print_help(gen_info, opt_err);
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if options.print_version {
|
||||
let commit_info = get_commit_info();
|
||||
|
||||
println!("{}{}", crate_version, commit_info);
|
||||
|
||||
Ok(())
|
||||
} else if options.print_config {
|
||||
print!("{}", default_config_as_toml::<T>());
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
let config = if let Some(path) = options.config_file {
|
||||
config_from_toml_file(path)?
|
||||
} else {
|
||||
T::default()
|
||||
};
|
||||
|
||||
if let Some(log_level) = config.get_log_level() {
|
||||
start_logger(log_level)?;
|
||||
}
|
||||
|
||||
if options.print_parsed_config {
|
||||
println!("Running with configuration: {:#?}", config);
|
||||
}
|
||||
|
||||
app_fn(config)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_help<F>(info_generator: F, opt_error: Option<String>)
|
||||
where
|
||||
F: FnOnce() -> String,
|
||||
{
|
||||
println!("{}", info_generator());
|
||||
|
||||
println!("\nOptions:");
|
||||
println!(" -c, --config-file Load config from this path");
|
||||
println!(" -h, --help Print this help message");
|
||||
println!(" -p, --print-config Print default config");
|
||||
println!(" -P Print parsed config");
|
||||
println!(" -v, --version Print version information");
|
||||
|
||||
if let Some(error) = opt_error {
|
||||
println!("\nError: {}.", error);
|
||||
}
|
||||
}
|
||||
|
||||
fn config_from_toml_file<T>(path: String) -> anyhow::Result<T>
|
||||
where
|
||||
T: DeserializeOwned,
|
||||
{
|
||||
let mut file = File::open(path.clone())
|
||||
.with_context(|| format!("Couldn't open config file {}", path.clone()))?;
|
||||
|
||||
let mut data = String::new();
|
||||
|
||||
file.read_to_string(&mut data)
|
||||
.with_context(|| format!("Couldn't read config file {}", path.clone()))?;
|
||||
|
||||
toml::from_str(&data).with_context(|| format!("Couldn't parse config file {}", path.clone()))
|
||||
}
|
||||
|
||||
fn default_config_as_toml<T>() -> String
|
||||
where
|
||||
T: Default + TomlConfig,
|
||||
{
|
||||
<T as TomlConfig>::default_to_string()
|
||||
}
|
||||
|
||||
fn start_logger(log_level: LogLevel) -> ::anyhow::Result<()> {
|
||||
let level_filter = match log_level {
|
||||
LogLevel::Off => LevelFilter::Off,
|
||||
LogLevel::Error => LevelFilter::Error,
|
||||
LogLevel::Warn => LevelFilter::Warn,
|
||||
LogLevel::Info => LevelFilter::Info,
|
||||
LogLevel::Debug => LevelFilter::Debug,
|
||||
LogLevel::Trace => LevelFilter::Trace,
|
||||
};
|
||||
|
||||
SimpleLogger::new()
|
||||
.with_level(level_filter)
|
||||
.with_utc_timestamps()
|
||||
.init()
|
||||
.context("Couldn't initialize logger")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_commit_info() -> String {
|
||||
git_testament!(TESTAMENT);
|
||||
|
||||
match TESTAMENT.commit {
|
||||
CommitKind::NoTags(hash, date) => {
|
||||
format!(" ({} - {})", first_8_chars(hash), date)
|
||||
}
|
||||
CommitKind::FromTag(_tag, hash, date, _tag_distance) => {
|
||||
format!(" ({} - {})", first_8_chars(hash), date)
|
||||
}
|
||||
_ => String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn first_8_chars(input: &str) -> String {
|
||||
input.chars().take(8).collect()
|
||||
}
|
||||
415
crates/common/src/cpu_pinning.rs
Normal file
415
crates/common/src/cpu_pinning.rs
Normal file
|
|
@ -0,0 +1,415 @@
|
|||
//! Experimental CPU pinning
|
||||
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum CpuPinningDirection {
|
||||
Ascending,
|
||||
Descending,
|
||||
}
|
||||
|
||||
impl Default for CpuPinningDirection {
|
||||
fn default() -> Self {
|
||||
Self::Ascending
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "glommio")]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, TomlConfig, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum HyperThreadMapping {
|
||||
System,
|
||||
Subsequent,
|
||||
Split,
|
||||
}
|
||||
|
||||
#[cfg(feature = "glommio")]
|
||||
impl Default for HyperThreadMapping {
|
||||
fn default() -> Self {
|
||||
Self::System
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CpuPinningConfig {
|
||||
fn active(&self) -> bool;
|
||||
fn direction(&self) -> CpuPinningDirection;
|
||||
#[cfg(feature = "glommio")]
|
||||
fn hyperthread(&self) -> HyperThreadMapping;
|
||||
fn core_offset(&self) -> usize;
|
||||
}
|
||||
|
||||
// Do these shenanigans for compatibility with aquatic_toml_config
|
||||
#[duplicate::duplicate_item(
|
||||
mod_name struct_name cpu_pinning_direction;
|
||||
[asc] [CpuPinningConfigAsc] [CpuPinningDirection::Ascending];
|
||||
[desc] [CpuPinningConfigDesc] [CpuPinningDirection::Descending];
|
||||
)]
|
||||
pub mod mod_name {
|
||||
use super::*;
|
||||
|
||||
/// Experimental cpu pinning
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
pub struct struct_name {
|
||||
pub active: bool,
|
||||
pub direction: CpuPinningDirection,
|
||||
#[cfg(feature = "glommio")]
|
||||
pub hyperthread: HyperThreadMapping,
|
||||
pub core_offset: usize,
|
||||
}
|
||||
|
||||
impl Default for struct_name {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
active: false,
|
||||
direction: cpu_pinning_direction,
|
||||
#[cfg(feature = "glommio")]
|
||||
hyperthread: Default::default(),
|
||||
core_offset: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl CpuPinningConfig for struct_name {
|
||||
fn active(&self) -> bool {
|
||||
self.active
|
||||
}
|
||||
fn direction(&self) -> CpuPinningDirection {
|
||||
self.direction
|
||||
}
|
||||
#[cfg(feature = "glommio")]
|
||||
fn hyperthread(&self) -> HyperThreadMapping {
|
||||
self.hyperthread
|
||||
}
|
||||
fn core_offset(&self) -> usize {
|
||||
self.core_offset
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum WorkerIndex {
|
||||
SocketWorker(usize),
|
||||
SwarmWorker(usize),
|
||||
Util,
|
||||
}
|
||||
|
||||
impl WorkerIndex {
|
||||
pub fn get_core_index<C: CpuPinningConfig>(
|
||||
&self,
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
num_cores: usize,
|
||||
) -> usize {
|
||||
let ascending_index = match self {
|
||||
Self::SocketWorker(index) => config.core_offset() + index,
|
||||
Self::SwarmWorker(index) => config.core_offset() + socket_workers + index,
|
||||
Self::Util => config.core_offset() + socket_workers + swarm_workers,
|
||||
};
|
||||
|
||||
let max_core_index = num_cores - 1;
|
||||
|
||||
let ascending_index = ascending_index.min(max_core_index);
|
||||
|
||||
match config.direction() {
|
||||
CpuPinningDirection::Ascending => ascending_index,
|
||||
CpuPinningDirection::Descending => max_core_index - ascending_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "glommio")]
|
||||
pub mod glommio {
|
||||
use ::glommio::{CpuSet, Placement};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_cpu_set() -> anyhow::Result<CpuSet> {
|
||||
CpuSet::online().map_err(|err| anyhow::anyhow!("Couldn't get CPU set: {:#}", err))
|
||||
}
|
||||
|
||||
fn get_num_cpu_cores() -> anyhow::Result<usize> {
|
||||
get_cpu_set()?
|
||||
.iter()
|
||||
.map(|l| l.core)
|
||||
.max()
|
||||
.map(|index| index + 1)
|
||||
.ok_or(anyhow::anyhow!("CpuSet is empty"))
|
||||
}
|
||||
|
||||
fn logical_cpus_string(cpu_set: &CpuSet) -> String {
|
||||
let mut logical_cpus = cpu_set.iter().map(|l| l.cpu).collect::<Vec<usize>>();
|
||||
|
||||
logical_cpus.sort_unstable();
|
||||
|
||||
logical_cpus
|
||||
.into_iter()
|
||||
.map(|cpu| cpu.to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join(", ")
|
||||
}
|
||||
|
||||
fn get_worker_cpu_set<C: CpuPinningConfig>(
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
worker_index: WorkerIndex,
|
||||
) -> anyhow::Result<CpuSet> {
|
||||
let num_cpu_cores = get_num_cpu_cores()?;
|
||||
|
||||
let core_index =
|
||||
worker_index.get_core_index(config, socket_workers, swarm_workers, num_cpu_cores);
|
||||
|
||||
let too_many_workers = match (&config.hyperthread(), &config.direction()) {
|
||||
(
|
||||
HyperThreadMapping::Split | HyperThreadMapping::Subsequent,
|
||||
CpuPinningDirection::Ascending,
|
||||
) => core_index >= num_cpu_cores / 2,
|
||||
(
|
||||
HyperThreadMapping::Split | HyperThreadMapping::Subsequent,
|
||||
CpuPinningDirection::Descending,
|
||||
) => core_index < num_cpu_cores / 2,
|
||||
(_, _) => false,
|
||||
};
|
||||
|
||||
if too_many_workers {
|
||||
return Err(anyhow::anyhow!("CPU pinning: total number of workers (including the single utility worker) can not exceed number of virtual CPUs / 2 - core_offset in this hyperthread mapping mode"));
|
||||
}
|
||||
|
||||
let cpu_set = match config.hyperthread() {
|
||||
HyperThreadMapping::System => get_cpu_set()?.filter(|l| l.core == core_index),
|
||||
HyperThreadMapping::Split => match config.direction() {
|
||||
CpuPinningDirection::Ascending => get_cpu_set()?
|
||||
.filter(|l| l.cpu == core_index || l.cpu == core_index + num_cpu_cores / 2),
|
||||
CpuPinningDirection::Descending => get_cpu_set()?
|
||||
.filter(|l| l.cpu == core_index || l.cpu == core_index - num_cpu_cores / 2),
|
||||
},
|
||||
HyperThreadMapping::Subsequent => {
|
||||
let cpu_index_offset = match config.direction() {
|
||||
// 0 -> 0 and 1
|
||||
// 1 -> 2 and 3
|
||||
// 2 -> 4 and 5
|
||||
CpuPinningDirection::Ascending => core_index * 2,
|
||||
// 15 -> 14 and 15
|
||||
// 14 -> 12 and 13
|
||||
// 13 -> 10 and 11
|
||||
CpuPinningDirection::Descending => {
|
||||
num_cpu_cores - 2 * (num_cpu_cores - core_index)
|
||||
}
|
||||
};
|
||||
|
||||
get_cpu_set()?
|
||||
.filter(|l| l.cpu == cpu_index_offset || l.cpu == cpu_index_offset + 1)
|
||||
}
|
||||
};
|
||||
|
||||
if cpu_set.is_empty() {
|
||||
Err(anyhow::anyhow!(
|
||||
"CPU pinning: produced empty CPU set for {:?}. Try decreasing number of workers",
|
||||
worker_index
|
||||
))
|
||||
} else {
|
||||
::log::info!(
|
||||
"Logical CPUs for {:?}: {}",
|
||||
worker_index,
|
||||
logical_cpus_string(&cpu_set)
|
||||
);
|
||||
|
||||
Ok(cpu_set)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_worker_placement<C: CpuPinningConfig>(
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
worker_index: WorkerIndex,
|
||||
) -> anyhow::Result<Placement> {
|
||||
if config.active() {
|
||||
let cpu_set = get_worker_cpu_set(config, socket_workers, swarm_workers, worker_index)?;
|
||||
|
||||
Ok(Placement::Fenced(cpu_set))
|
||||
} else {
|
||||
Ok(Placement::Unbound)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_affinity_for_util_worker<C: CpuPinningConfig>(
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
) -> anyhow::Result<()> {
|
||||
let worker_cpu_set =
|
||||
get_worker_cpu_set(config, socket_workers, swarm_workers, WorkerIndex::Util)?;
|
||||
|
||||
unsafe {
|
||||
let mut set: libc::cpu_set_t = ::std::mem::zeroed();
|
||||
|
||||
for cpu_location in worker_cpu_set {
|
||||
libc::CPU_SET(cpu_location.cpu, &mut set);
|
||||
}
|
||||
|
||||
let status = libc::pthread_setaffinity_np(
|
||||
libc::pthread_self(),
|
||||
::std::mem::size_of::<libc::cpu_set_t>(),
|
||||
&set,
|
||||
);
|
||||
|
||||
if status != 0 {
|
||||
return Err(anyhow::Error::new(::std::io::Error::from_raw_os_error(
|
||||
status,
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Pin current thread to a suitable core
|
||||
///
|
||||
/// Requires hwloc (`apt-get install libhwloc-dev`)
|
||||
#[cfg(feature = "hwloc")]
|
||||
pub fn pin_current_if_configured_to<C: CpuPinningConfig>(
|
||||
config: &C,
|
||||
socket_workers: usize,
|
||||
swarm_workers: usize,
|
||||
worker_index: WorkerIndex,
|
||||
) {
|
||||
use hwloc::{CpuSet, ObjectType, Topology, CPUBIND_THREAD};
|
||||
|
||||
if config.active() {
|
||||
let mut topology = Topology::new();
|
||||
|
||||
let core_cpu_sets: Vec<CpuSet> = topology
|
||||
.objects_with_type(&ObjectType::Core)
|
||||
.expect("hwloc: list cores")
|
||||
.into_iter()
|
||||
.map(|core| core.allowed_cpuset().expect("hwloc: get core cpu set"))
|
||||
.collect();
|
||||
|
||||
let num_cores = core_cpu_sets.len();
|
||||
|
||||
let core_index =
|
||||
worker_index.get_core_index(config, socket_workers, swarm_workers, num_cores);
|
||||
|
||||
let cpu_set = core_cpu_sets
|
||||
.get(core_index)
|
||||
.expect(&format!("get cpu set for core {}", core_index))
|
||||
.to_owned();
|
||||
|
||||
topology
|
||||
.set_cpubind(cpu_set, CPUBIND_THREAD)
|
||||
.expect(&format!("bind thread to core {}", core_index));
|
||||
|
||||
::log::info!(
|
||||
"Pinned worker {:?} to cpu core {}",
|
||||
worker_index,
|
||||
core_index
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Tell Linux that incoming messages should be handled by the socket worker
|
||||
/// with the same index as the CPU core receiving the interrupt.
|
||||
///
|
||||
/// Requires that sockets are actually bound in order, so waiting has to be done
|
||||
/// in socket workers.
|
||||
///
|
||||
/// It might make sense to first enable RSS or RPS (if hardware doesn't support
|
||||
/// RSS) and enable sending interrupts to all CPUs that have socket workers
|
||||
/// running on them. Possibly, CPU 0 should be excluded.
|
||||
///
|
||||
/// More Information:
|
||||
/// - https://talawah.io/blog/extreme-http-performance-tuning-one-point-two-million/
|
||||
/// - https://www.kernel.org/doc/Documentation/networking/scaling.txt
|
||||
/// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/performance_tuning_guide/network-rps
|
||||
#[cfg(target_os = "linux")]
|
||||
pub fn socket_attach_cbpf<S: ::std::os::unix::prelude::AsRawFd>(
|
||||
socket: &S,
|
||||
_num_sockets: usize,
|
||||
) -> ::std::io::Result<()> {
|
||||
use std::mem::size_of;
|
||||
use std::os::raw::c_void;
|
||||
|
||||
use libc::{setsockopt, sock_filter, sock_fprog, SOL_SOCKET, SO_ATTACH_REUSEPORT_CBPF};
|
||||
|
||||
// Good BPF documentation: https://man.openbsd.org/bpf.4
|
||||
|
||||
// Values of constants were copied from the following Linux source files:
|
||||
// - include/uapi/linux/bpf_common.h
|
||||
// - include/uapi/linux/filter.h
|
||||
|
||||
// Instruction
|
||||
const BPF_LD: u16 = 0x00; // Load into A
|
||||
// const BPF_LDX: u16 = 0x01; // Load into X
|
||||
// const BPF_ALU: u16 = 0x04; // Load into X
|
||||
const BPF_RET: u16 = 0x06; // Return value
|
||||
// const BPF_MOD: u16 = 0x90; // Run modulo on A
|
||||
|
||||
// Size
|
||||
const BPF_W: u16 = 0x00; // 32-bit width
|
||||
|
||||
// Source
|
||||
// const BPF_IMM: u16 = 0x00; // Use constant (k)
|
||||
const BPF_ABS: u16 = 0x20;
|
||||
|
||||
// Registers
|
||||
// const BPF_K: u16 = 0x00;
|
||||
const BPF_A: u16 = 0x10;
|
||||
|
||||
// k
|
||||
const SKF_AD_OFF: i32 = -0x1000; // Activate extensions
|
||||
const SKF_AD_CPU: i32 = 36; // Extension for getting CPU
|
||||
|
||||
// Return index of socket that should receive packet
|
||||
let mut filter = [
|
||||
// Store index of CPU receiving packet in register A
|
||||
sock_filter {
|
||||
code: BPF_LD | BPF_W | BPF_ABS,
|
||||
jt: 0,
|
||||
jf: 0,
|
||||
k: u32::from_ne_bytes((SKF_AD_OFF + SKF_AD_CPU).to_ne_bytes()),
|
||||
},
|
||||
/* Disabled, because it doesn't make a lot of sense
|
||||
// Run A = A % socket_workers
|
||||
sock_filter {
|
||||
code: BPF_ALU | BPF_MOD,
|
||||
jt: 0,
|
||||
jf: 0,
|
||||
k: num_sockets as u32,
|
||||
},
|
||||
*/
|
||||
// Return A
|
||||
sock_filter {
|
||||
code: BPF_RET | BPF_A,
|
||||
jt: 0,
|
||||
jf: 0,
|
||||
k: 0,
|
||||
},
|
||||
];
|
||||
|
||||
let program = sock_fprog {
|
||||
filter: filter.as_mut_ptr(),
|
||||
len: filter.len() as u16,
|
||||
};
|
||||
|
||||
let program_ptr: *const sock_fprog = &program;
|
||||
|
||||
unsafe {
|
||||
let result = setsockopt(
|
||||
socket.as_raw_fd(),
|
||||
SOL_SOCKET,
|
||||
SO_ATTACH_REUSEPORT_CBPF,
|
||||
program_ptr as *const c_void,
|
||||
size_of::<sock_fprog>() as u32,
|
||||
);
|
||||
|
||||
if result != 0 {
|
||||
Err(::std::io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
279
crates/common/src/lib.rs
Normal file
279
crates/common/src/lib.rs
Normal file
|
|
@ -0,0 +1,279 @@
|
|||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use ahash::RandomState;
|
||||
use rand::Rng;
|
||||
|
||||
pub mod access_list;
|
||||
pub mod cli;
|
||||
pub mod cpu_pinning;
|
||||
pub mod privileges;
|
||||
#[cfg(feature = "rustls")]
|
||||
pub mod rustls_config;
|
||||
|
||||
/// IndexMap using AHash hasher
|
||||
pub type IndexMap<K, V> = indexmap::IndexMap<K, V, RandomState>;
|
||||
|
||||
/// Peer, connection or similar valid until this instant
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ValidUntil(SecondsSinceServerStart);
|
||||
|
||||
impl ValidUntil {
|
||||
#[inline]
|
||||
pub fn new(start_instant: ServerStartInstant, offset_seconds: u32) -> Self {
|
||||
Self(SecondsSinceServerStart(
|
||||
start_instant.seconds_elapsed().0 + offset_seconds,
|
||||
))
|
||||
}
|
||||
pub fn new_with_now(now: SecondsSinceServerStart, offset_seconds: u32) -> Self {
|
||||
Self(SecondsSinceServerStart(now.0 + offset_seconds))
|
||||
}
|
||||
pub fn valid(&self, now: SecondsSinceServerStart) -> bool {
|
||||
self.0 .0 > now.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ServerStartInstant(Instant);
|
||||
|
||||
impl ServerStartInstant {
|
||||
pub fn new() -> Self {
|
||||
Self(Instant::now())
|
||||
}
|
||||
pub fn seconds_elapsed(&self) -> SecondsSinceServerStart {
|
||||
SecondsSinceServerStart(
|
||||
self.0
|
||||
.elapsed()
|
||||
.as_secs()
|
||||
.try_into()
|
||||
.expect("server ran for more seconds than what fits in a u32"),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct SecondsSinceServerStart(u32);
|
||||
|
||||
pub struct PanicSentinelWatcher(Arc<AtomicBool>);
|
||||
|
||||
impl PanicSentinelWatcher {
|
||||
pub fn create_with_sentinel() -> (Self, PanicSentinel) {
|
||||
let triggered = Arc::new(AtomicBool::new(false));
|
||||
let sentinel = PanicSentinel(triggered.clone());
|
||||
|
||||
(Self(triggered), sentinel)
|
||||
}
|
||||
|
||||
pub fn panic_was_triggered(&self) -> bool {
|
||||
self.0.load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
/// Raises SIGTERM when dropped
|
||||
///
|
||||
/// Pass to threads to have panics in them cause whole program to exit.
|
||||
#[derive(Clone)]
|
||||
pub struct PanicSentinel(Arc<AtomicBool>);
|
||||
|
||||
impl Drop for PanicSentinel {
|
||||
fn drop(&mut self) {
|
||||
if ::std::thread::panicking() {
|
||||
let already_triggered = self.0.fetch_or(true, Ordering::SeqCst);
|
||||
|
||||
if !already_triggered {
|
||||
if unsafe { libc::raise(15) } == -1 {
|
||||
panic!(
|
||||
"Could not raise SIGTERM: {:#}",
|
||||
::std::io::Error::last_os_error()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// SocketAddr that is not an IPv6-mapped IPv4 address
|
||||
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct CanonicalSocketAddr(SocketAddr);
|
||||
|
||||
impl CanonicalSocketAddr {
|
||||
pub fn new(addr: SocketAddr) -> Self {
|
||||
match addr {
|
||||
addr @ SocketAddr::V4(_) => Self(addr),
|
||||
SocketAddr::V6(addr) => {
|
||||
match addr.ip().octets() {
|
||||
// Convert IPv4-mapped address (available in std but nightly-only)
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, a, b, c, d] => Self(SocketAddr::V4(
|
||||
SocketAddrV4::new(Ipv4Addr::new(a, b, c, d), addr.port()),
|
||||
)),
|
||||
_ => Self(addr.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_ipv6_mapped(self) -> SocketAddr {
|
||||
match self.0 {
|
||||
SocketAddr::V4(addr) => {
|
||||
let ip = addr.ip().to_ipv6_mapped();
|
||||
|
||||
SocketAddr::V6(SocketAddrV6::new(ip, addr.port(), 0, 0))
|
||||
}
|
||||
addr => addr,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(self) -> SocketAddr {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn get_ipv4(self) -> Option<SocketAddr> {
|
||||
match self.0 {
|
||||
addr @ SocketAddr::V4(_) => Some(addr),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_ipv4(&self) -> bool {
|
||||
self.0.is_ipv4()
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract response peers
|
||||
///
|
||||
/// If there are more peers in map than `max_num_peers_to_take`, do a random
|
||||
/// selection of peers from first and second halves of map in order to avoid
|
||||
/// returning too homogeneous peers.
|
||||
#[inline]
|
||||
pub fn extract_response_peers<K, V, R, F>(
|
||||
rng: &mut impl Rng,
|
||||
peer_map: &IndexMap<K, V>,
|
||||
max_num_peers_to_take: usize,
|
||||
sender_peer_map_key: K,
|
||||
peer_conversion_function: F,
|
||||
) -> Vec<R>
|
||||
where
|
||||
K: Eq + ::std::hash::Hash,
|
||||
F: Fn(&V) -> R,
|
||||
{
|
||||
if peer_map.len() <= max_num_peers_to_take + 1 {
|
||||
// This branch: number of peers in map (minus sender peer) is less than
|
||||
// or equal to number of peers to take, so return all except sender
|
||||
// peer.
|
||||
let mut peers = Vec::with_capacity(peer_map.len());
|
||||
|
||||
peers.extend(peer_map.iter().filter_map(|(k, v)| {
|
||||
(*k != sender_peer_map_key).then_some(peer_conversion_function(v))
|
||||
}));
|
||||
|
||||
// Handle the case when sender peer is not in peer list. Typically,
|
||||
// this function will not be called when this is the case.
|
||||
if peers.len() > max_num_peers_to_take {
|
||||
peers.pop();
|
||||
}
|
||||
|
||||
peers
|
||||
} else {
|
||||
// Note: if this branch is taken, the peer map contains at least two
|
||||
// more peers than max_num_peers_to_take
|
||||
|
||||
let middle_index = peer_map.len() / 2;
|
||||
// Add one to take two extra peers in case sender peer is among
|
||||
// selected peers and will need to be filtered out
|
||||
let num_to_take_per_half = (max_num_peers_to_take / 2) + 1;
|
||||
|
||||
let offset_half_one = {
|
||||
let from = 0;
|
||||
let to = usize::max(1, middle_index - num_to_take_per_half);
|
||||
|
||||
rng.gen_range(from..to)
|
||||
};
|
||||
let offset_half_two = {
|
||||
let from = middle_index;
|
||||
let to = usize::max(middle_index + 1, peer_map.len() - num_to_take_per_half);
|
||||
|
||||
rng.gen_range(from..to)
|
||||
};
|
||||
|
||||
let end_half_one = offset_half_one + num_to_take_per_half;
|
||||
let end_half_two = offset_half_two + num_to_take_per_half;
|
||||
|
||||
let mut peers = Vec::with_capacity(max_num_peers_to_take + 2);
|
||||
|
||||
if let Some(slice) = peer_map.get_range(offset_half_one..end_half_one) {
|
||||
peers.extend(slice.iter().filter_map(|(k, v)| {
|
||||
(*k != sender_peer_map_key).then_some(peer_conversion_function(v))
|
||||
}));
|
||||
}
|
||||
if let Some(slice) = peer_map.get_range(offset_half_two..end_half_two) {
|
||||
peers.extend(slice.iter().filter_map(|(k, v)| {
|
||||
(*k != sender_peer_map_key).then_some(peer_conversion_function(v))
|
||||
}));
|
||||
}
|
||||
|
||||
while peers.len() > max_num_peers_to_take {
|
||||
peers.pop();
|
||||
}
|
||||
|
||||
peers
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ahash::HashSet;
|
||||
|
||||
use rand::{rngs::SmallRng, SeedableRng};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_extract_response_peers() {
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
||||
for num_peers_in_map in 0..50 {
|
||||
for max_num_peers_to_take in 0..50 {
|
||||
for sender_peer_map_key in 0..50 {
|
||||
test_extract_response_peers_helper(
|
||||
&mut rng,
|
||||
num_peers_in_map,
|
||||
max_num_peers_to_take,
|
||||
sender_peer_map_key,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn test_extract_response_peers_helper(
|
||||
rng: &mut SmallRng,
|
||||
num_peers_in_map: usize,
|
||||
max_num_peers_to_take: usize,
|
||||
sender_peer_map_key: usize,
|
||||
) {
|
||||
let peer_map = IndexMap::from_iter((0..num_peers_in_map).map(|i| (i, i)));
|
||||
|
||||
let response_peers = extract_response_peers(
|
||||
rng,
|
||||
&peer_map,
|
||||
max_num_peers_to_take,
|
||||
sender_peer_map_key,
|
||||
|p| *p,
|
||||
);
|
||||
|
||||
if num_peers_in_map > max_num_peers_to_take + 1 {
|
||||
assert_eq!(response_peers.len(), max_num_peers_to_take);
|
||||
} else {
|
||||
assert!(response_peers.len() <= max_num_peers_to_take);
|
||||
}
|
||||
|
||||
assert!(!response_peers.contains(&sender_peer_map_key));
|
||||
|
||||
assert_eq!(
|
||||
response_peers.len(),
|
||||
HashSet::from_iter(response_peers.iter().copied()).len()
|
||||
);
|
||||
}
|
||||
}
|
||||
64
crates/common/src/privileges.rs
Normal file
64
crates/common/src/privileges.rs
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, Barrier},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use privdrop::PrivDrop;
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct PrivilegeConfig {
|
||||
/// Chroot and switch group and user after binding to sockets
|
||||
pub drop_privileges: bool,
|
||||
/// Chroot to this path
|
||||
pub chroot_path: PathBuf,
|
||||
/// Group to switch to after chrooting
|
||||
pub group: String,
|
||||
/// User to switch to after chrooting
|
||||
pub user: String,
|
||||
}
|
||||
|
||||
impl Default for PrivilegeConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
drop_privileges: false,
|
||||
chroot_path: ".".into(),
|
||||
user: "nobody".to_string(),
|
||||
group: "nogroup".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PrivilegeDropper {
|
||||
barrier: Arc<Barrier>,
|
||||
config: Arc<PrivilegeConfig>,
|
||||
}
|
||||
|
||||
impl PrivilegeDropper {
|
||||
pub fn new(config: PrivilegeConfig, num_sockets: usize) -> Self {
|
||||
Self {
|
||||
barrier: Arc::new(Barrier::new(num_sockets)),
|
||||
config: Arc::new(config),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn after_socket_creation(self) -> anyhow::Result<()> {
|
||||
if self.config.drop_privileges {
|
||||
if self.barrier.wait().is_leader() {
|
||||
PrivDrop::default()
|
||||
.chroot(self.config.chroot_path.clone())
|
||||
.group(self.config.group.clone())
|
||||
.user(self.config.user.clone())
|
||||
.apply()
|
||||
.with_context(|| "couldn't drop privileges after socket creation")?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
48
crates/common/src/rustls_config.rs
Normal file
48
crates/common/src/rustls_config.rs
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
use std::{fs::File, io::BufReader, path::Path};
|
||||
|
||||
use anyhow::Context;
|
||||
|
||||
pub type RustlsConfig = rustls::ServerConfig;
|
||||
|
||||
pub fn create_rustls_config(
|
||||
tls_certificate_path: &Path,
|
||||
tls_private_key_path: &Path,
|
||||
) -> anyhow::Result<RustlsConfig> {
|
||||
let certs = {
|
||||
let f = File::open(tls_certificate_path).with_context(|| {
|
||||
format!(
|
||||
"open tls certificate file at {}",
|
||||
tls_certificate_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
let mut f = BufReader::new(f);
|
||||
|
||||
rustls_pemfile::certs(&mut f)?
|
||||
.into_iter()
|
||||
.map(|bytes| rustls::Certificate(bytes))
|
||||
.collect()
|
||||
};
|
||||
|
||||
let private_key = {
|
||||
let f = File::open(tls_private_key_path).with_context(|| {
|
||||
format!(
|
||||
"open tls private key file at {}",
|
||||
tls_private_key_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
let mut f = BufReader::new(f);
|
||||
|
||||
rustls_pemfile::pkcs8_private_keys(&mut f)?
|
||||
.first()
|
||||
.map(|bytes| rustls::PrivateKey(bytes.clone()))
|
||||
.ok_or(anyhow::anyhow!("No private keys in file"))?
|
||||
};
|
||||
|
||||
let tls_config = rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, private_key)
|
||||
.with_context(|| "create rustls config")?;
|
||||
|
||||
Ok(tls_config)
|
||||
}
|
||||
54
crates/http/Cargo.toml
Normal file
54
crates/http/Cargo.toml
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
[package]
|
||||
name = "aquatic_http"
|
||||
description = "High-performance open BitTorrent tracker (HTTP over TLS)"
|
||||
keywords = ["http", "server", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_http"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_http"
|
||||
|
||||
[features]
|
||||
default = ["prometheus"]
|
||||
prometheus = ["metrics", "metrics-exporter-prometheus"]
|
||||
metrics = ["dep:metrics"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_common = { workspace = true, features = ["rustls", "glommio"] }
|
||||
aquatic_http_protocol.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
cfg-if = "1"
|
||||
either = "1"
|
||||
futures = "0.3"
|
||||
futures-lite = "1"
|
||||
futures-rustls = "0.24"
|
||||
glommio = "0.8"
|
||||
itoa = "1"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
metrics = { version = "0.21", optional = true }
|
||||
metrics-exporter-prometheus = { version = "0.12", optional = true, default-features = false, features = ["http-listener"] }
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
memchr = "2"
|
||||
privdrop = "0.5"
|
||||
once_cell = "1"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
rustls-pemfile = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
signal-hook = { version = "0.3" }
|
||||
slab = "0.4"
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
37
crates/http/src/common.rs
Normal file
37
crates/http/src/common.rs
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use aquatic_common::access_list::AccessListArcSwap;
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
|
||||
pub use aquatic_common::ValidUntil;
|
||||
|
||||
use aquatic_http_protocol::{
|
||||
request::{AnnounceRequest, ScrapeRequest},
|
||||
response::{AnnounceResponse, ScrapeResponse},
|
||||
};
|
||||
use glommio::channels::shared_channel::SharedSender;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ConsumerId(pub usize);
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct ConnectionId(pub usize);
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ChannelRequest {
|
||||
Announce {
|
||||
request: AnnounceRequest,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
response_sender: SharedSender<AnnounceResponse>,
|
||||
},
|
||||
Scrape {
|
||||
request: ScrapeRequest,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
response_sender: SharedSender<ScrapeResponse>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct State {
|
||||
pub access_list: Arc<AccessListArcSwap>,
|
||||
}
|
||||
163
crates/http/src/config.rs
Normal file
163
crates/http/src/config.rs
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
use std::{net::SocketAddr, path::PathBuf};
|
||||
|
||||
use aquatic_common::{
|
||||
access_list::AccessListConfig, cpu_pinning::asc::CpuPinningConfigAsc,
|
||||
privileges::PrivilegeConfig,
|
||||
};
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
|
||||
/// aquatic_http configuration
|
||||
///
|
||||
/// Does not support running behind a reverse proxy.
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Socket workers receive requests from the socket, parse them and send
|
||||
/// them on to the swarm workers. They then receive responses from the
|
||||
/// swarm workers, encode them and send them back over the socket.
|
||||
pub socket_workers: usize,
|
||||
/// Swarm workers receive a number of requests from socket workers,
|
||||
/// generate responses and send them back to the socket workers.
|
||||
pub swarm_workers: usize,
|
||||
pub log_level: LogLevel,
|
||||
pub network: NetworkConfig,
|
||||
pub protocol: ProtocolConfig,
|
||||
pub cleaning: CleaningConfig,
|
||||
pub privileges: PrivilegeConfig,
|
||||
pub access_list: AccessListConfig,
|
||||
pub cpu_pinning: CpuPinningConfigAsc,
|
||||
#[cfg(feature = "metrics")]
|
||||
pub metrics: MetricsConfig,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
socket_workers: 1,
|
||||
swarm_workers: 1,
|
||||
log_level: LogLevel::default(),
|
||||
network: NetworkConfig::default(),
|
||||
protocol: ProtocolConfig::default(),
|
||||
cleaning: CleaningConfig::default(),
|
||||
privileges: PrivilegeConfig::default(),
|
||||
access_list: AccessListConfig::default(),
|
||||
cpu_pinning: Default::default(),
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// Bind to this address
|
||||
pub address: SocketAddr,
|
||||
/// Only allow access over IPv6
|
||||
pub only_ipv6: bool,
|
||||
/// Maximum number of pending TCP connections
|
||||
pub tcp_backlog: i32,
|
||||
/// Path to TLS certificate (DER-encoded X.509)
|
||||
pub tls_certificate_path: PathBuf,
|
||||
/// Path to TLS private key (DER-encoded ASN.1 in PKCS#8 or PKCS#1 format)
|
||||
pub tls_private_key_path: PathBuf,
|
||||
/// Keep connections alive after sending a response
|
||||
pub keep_alive: bool,
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
address: SocketAddr::from(([0, 0, 0, 0], 3000)),
|
||||
tls_certificate_path: "".into(),
|
||||
tls_private_key_path: "".into(),
|
||||
only_ipv6: false,
|
||||
tcp_backlog: 1024,
|
||||
keep_alive: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct ProtocolConfig {
|
||||
/// Maximum number of torrents to accept in scrape request
|
||||
pub max_scrape_torrents: usize,
|
||||
/// Maximum number of requested peers to accept in announce request
|
||||
pub max_peers: usize,
|
||||
/// Ask peers to announce this often (seconds)
|
||||
pub peer_announce_interval: usize,
|
||||
}
|
||||
|
||||
impl Default for ProtocolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_scrape_torrents: 100,
|
||||
max_peers: 50,
|
||||
peer_announce_interval: 120,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct CleaningConfig {
|
||||
/// Clean peers this often (seconds)
|
||||
pub torrent_cleaning_interval: u64,
|
||||
/// Clean connections this often (seconds)
|
||||
pub connection_cleaning_interval: u64,
|
||||
/// Remove peers that have not announced for this long (seconds)
|
||||
pub max_peer_age: u32,
|
||||
/// Remove connections that haven't seen valid requests for this long (seconds)
|
||||
pub max_connection_idle: u32,
|
||||
}
|
||||
|
||||
impl Default for CleaningConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
torrent_cleaning_interval: 30,
|
||||
connection_cleaning_interval: 60,
|
||||
max_peer_age: 1800,
|
||||
max_connection_idle: 180,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct MetricsConfig {
|
||||
/// Run a prometheus endpoint
|
||||
pub run_prometheus_endpoint: bool,
|
||||
/// Address to run prometheus endpoint on
|
||||
pub prometheus_endpoint_address: SocketAddr,
|
||||
/// Update metrics for torrent count this often (seconds)
|
||||
pub torrent_count_update_interval: u64,
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
impl Default for MetricsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
run_prometheus_endpoint: false,
|
||||
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
|
||||
torrent_count_update_interval: 10,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
||||
160
crates/http/src/lib.rs
Normal file
160
crates/http/src/lib.rs
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
use anyhow::Context;
|
||||
use aquatic_common::{
|
||||
access_list::update_access_list,
|
||||
cpu_pinning::{
|
||||
glommio::{get_worker_placement, set_affinity_for_util_worker},
|
||||
WorkerIndex,
|
||||
},
|
||||
privileges::PrivilegeDropper,
|
||||
rustls_config::create_rustls_config,
|
||||
PanicSentinelWatcher, ServerStartInstant,
|
||||
};
|
||||
use common::State;
|
||||
use glommio::{channels::channel_mesh::MeshBuilder, prelude::*};
|
||||
use signal_hook::{
|
||||
consts::{SIGTERM, SIGUSR1},
|
||||
iterator::Signals,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
mod common;
|
||||
pub mod config;
|
||||
mod workers;
|
||||
|
||||
pub const APP_NAME: &str = "aquatic_http: BitTorrent tracker (HTTP over TLS)";
|
||||
pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
const SHARED_CHANNEL_SIZE: usize = 1024;
|
||||
|
||||
pub fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
let mut signals = Signals::new([SIGUSR1, SIGTERM])?;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.metrics.run_prometheus_endpoint {
|
||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||
|
||||
PrometheusBuilder::new()
|
||||
.with_http_listener(config.metrics.prometheus_endpoint_address)
|
||||
.install()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Install prometheus endpoint on {}",
|
||||
config.metrics.prometheus_endpoint_address
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
let state = State::default();
|
||||
|
||||
update_access_list(&config.access_list, &state.access_list)?;
|
||||
|
||||
let num_peers = config.socket_workers + config.swarm_workers;
|
||||
|
||||
let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_CHANNEL_SIZE);
|
||||
|
||||
let (sentinel_watcher, sentinel) = PanicSentinelWatcher::create_with_sentinel();
|
||||
let priv_dropper = PrivilegeDropper::new(config.privileges.clone(), config.socket_workers);
|
||||
|
||||
let tls_config = Arc::new(create_rustls_config(
|
||||
&config.network.tls_certificate_path,
|
||||
&config.network.tls_private_key_path,
|
||||
)?);
|
||||
|
||||
let server_start_instant = ServerStartInstant::new();
|
||||
|
||||
let mut executors = Vec::new();
|
||||
|
||||
for i in 0..(config.socket_workers) {
|
||||
let sentinel = sentinel.clone();
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let tls_config = tls_config.clone();
|
||||
let request_mesh_builder = request_mesh_builder.clone();
|
||||
let priv_dropper = priv_dropper.clone();
|
||||
|
||||
let placement = get_worker_placement(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::SocketWorker(i),
|
||||
)?;
|
||||
let builder = LocalExecutorBuilder::new(placement).name(&format!("socket-{:02}", i + 1));
|
||||
|
||||
let executor = builder
|
||||
.spawn(move || async move {
|
||||
workers::socket::run_socket_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
tls_config,
|
||||
request_mesh_builder,
|
||||
priv_dropper,
|
||||
server_start_instant,
|
||||
i,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.map_err(|err| anyhow::anyhow!("Spawning executor failed: {:#}", err))?;
|
||||
|
||||
executors.push(executor);
|
||||
}
|
||||
|
||||
for i in 0..(config.swarm_workers) {
|
||||
let sentinel = sentinel.clone();
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let request_mesh_builder = request_mesh_builder.clone();
|
||||
|
||||
let placement = get_worker_placement(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::SwarmWorker(i),
|
||||
)?;
|
||||
let builder = LocalExecutorBuilder::new(placement).name(&format!("swarm-{:02}", i + 1));
|
||||
|
||||
let executor = builder
|
||||
.spawn(move || async move {
|
||||
workers::swarm::run_swarm_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
request_mesh_builder,
|
||||
server_start_instant,
|
||||
i,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.map_err(|err| anyhow::anyhow!("Spawning executor failed: {:#}", err))?;
|
||||
|
||||
executors.push(executor);
|
||||
}
|
||||
|
||||
if config.cpu_pinning.active {
|
||||
set_affinity_for_util_worker(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
)?;
|
||||
}
|
||||
|
||||
for signal in &mut signals {
|
||||
match signal {
|
||||
SIGUSR1 => {
|
||||
let _ = update_access_list(&config.access_list, &state.access_list);
|
||||
}
|
||||
SIGTERM => {
|
||||
if sentinel_watcher.panic_was_triggered() {
|
||||
return Err(anyhow::anyhow!("worker thread panicked"));
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
14
crates/http/src/main.rs
Normal file
14
crates/http/src/main.rs
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
use aquatic_common::cli::run_app_with_cli_and_config;
|
||||
use aquatic_http::config::Config;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn main() {
|
||||
run_app_with_cli_and_config::<Config>(
|
||||
aquatic_http::APP_NAME,
|
||||
aquatic_http::APP_VERSION,
|
||||
aquatic_http::run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
2
crates/http/src/workers/mod.rs
Normal file
2
crates/http/src/workers/mod.rs
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
pub mod socket;
|
||||
pub mod swarm;
|
||||
575
crates/http/src/workers/socket.rs
Normal file
575
crates/http/src/workers/socket.rs
Normal file
|
|
@ -0,0 +1,575 @@
|
|||
use std::cell::RefCell;
|
||||
use std::collections::BTreeMap;
|
||||
use std::os::unix::prelude::{FromRawFd, IntoRawFd};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
||||
use aquatic_common::privileges::PrivilegeDropper;
|
||||
use aquatic_common::rustls_config::RustlsConfig;
|
||||
use aquatic_common::{CanonicalSocketAddr, PanicSentinel, ServerStartInstant};
|
||||
use aquatic_http_protocol::common::InfoHash;
|
||||
use aquatic_http_protocol::request::{Request, RequestParseError, ScrapeRequest};
|
||||
use aquatic_http_protocol::response::{
|
||||
FailureResponse, Response, ScrapeResponse, ScrapeStatistics,
|
||||
};
|
||||
use either::Either;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures_lite::{AsyncReadExt, AsyncWriteExt, StreamExt};
|
||||
use futures_rustls::server::TlsStream;
|
||||
use futures_rustls::TlsAcceptor;
|
||||
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role, Senders};
|
||||
use glommio::channels::shared_channel::{self, SharedReceiver};
|
||||
use glommio::net::{TcpListener, TcpStream};
|
||||
use glommio::task::JoinHandle;
|
||||
use glommio::timer::TimerActionRepeat;
|
||||
use glommio::{enclose, prelude::*};
|
||||
use once_cell::sync::Lazy;
|
||||
use slab::Slab;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
const REQUEST_BUFFER_SIZE: usize = 2048;
|
||||
const RESPONSE_BUFFER_SIZE: usize = 4096;
|
||||
|
||||
const RESPONSE_HEADER_A: &[u8] = b"HTTP/1.1 200 OK\r\nContent-Length: ";
|
||||
const RESPONSE_HEADER_B: &[u8] = b" ";
|
||||
const RESPONSE_HEADER_C: &[u8] = b"\r\n\r\n";
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
thread_local! { static WORKER_INDEX: ::std::cell::Cell<usize> = Default::default() }
|
||||
|
||||
static RESPONSE_HEADER: Lazy<Vec<u8>> =
|
||||
Lazy::new(|| [RESPONSE_HEADER_A, RESPONSE_HEADER_B, RESPONSE_HEADER_C].concat());
|
||||
|
||||
struct PendingScrapeResponse {
|
||||
pending_worker_responses: usize,
|
||||
stats: BTreeMap<InfoHash, ScrapeStatistics>,
|
||||
}
|
||||
|
||||
struct ConnectionReference {
|
||||
task_handle: Option<JoinHandle<()>>,
|
||||
valid_until: ValidUntil,
|
||||
}
|
||||
|
||||
pub async fn run_socket_worker(
|
||||
_sentinel: PanicSentinel,
|
||||
config: Config,
|
||||
state: State,
|
||||
tls_config: Arc<RustlsConfig>,
|
||||
request_mesh_builder: MeshBuilder<ChannelRequest, Partial>,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
server_start_instant: ServerStartInstant,
|
||||
worker_index: usize,
|
||||
) {
|
||||
#[cfg(feature = "metrics")]
|
||||
WORKER_INDEX.with(|index| index.set(worker_index));
|
||||
|
||||
let config = Rc::new(config);
|
||||
let access_list = state.access_list;
|
||||
|
||||
let listener = create_tcp_listener(&config, priv_dropper).expect("create tcp listener");
|
||||
|
||||
let (request_senders, _) = request_mesh_builder.join(Role::Producer).await.unwrap();
|
||||
let request_senders = Rc::new(request_senders);
|
||||
|
||||
let connection_slab = Rc::new(RefCell::new(Slab::new()));
|
||||
|
||||
TimerActionRepeat::repeat(enclose!((config, connection_slab) move || {
|
||||
clean_connections(
|
||||
config.clone(),
|
||||
connection_slab.clone(),
|
||||
server_start_instant,
|
||||
)
|
||||
}));
|
||||
|
||||
let mut incoming = listener.incoming();
|
||||
|
||||
while let Some(stream) = incoming.next().await {
|
||||
match stream {
|
||||
Ok(stream) => {
|
||||
let key = connection_slab.borrow_mut().insert(ConnectionReference {
|
||||
task_handle: None,
|
||||
valid_until: ValidUntil::new(
|
||||
server_start_instant,
|
||||
config.cleaning.max_connection_idle,
|
||||
),
|
||||
});
|
||||
|
||||
let task_handle = spawn_local(enclose!((config, access_list, request_senders, tls_config, connection_slab) async move {
|
||||
let result = match stream.peer_addr() {
|
||||
Ok(peer_addr) => {
|
||||
let peer_addr = CanonicalSocketAddr::new(peer_addr);
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
let ip_version_str = peer_addr_to_ip_version_str(&peer_addr);
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::increment_gauge!(
|
||||
"aquatic_active_connections",
|
||||
1.0,
|
||||
"ip_version" => ip_version_str,
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
|
||||
let result = Connection::run(
|
||||
config,
|
||||
access_list,
|
||||
request_senders,
|
||||
server_start_instant,
|
||||
ConnectionId(key),
|
||||
tls_config,
|
||||
connection_slab.clone(),
|
||||
stream,
|
||||
peer_addr
|
||||
).await;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::decrement_gauge!(
|
||||
"aquatic_active_connections",
|
||||
1.0,
|
||||
"ip_version" => ip_version_str,
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
|
||||
result
|
||||
}
|
||||
Err(err) => {
|
||||
Err(anyhow::anyhow!("Couldn't get peer addr: {:?}", err))
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(err) = result {
|
||||
::log::debug!("Connection::run() error: {:?}", err);
|
||||
}
|
||||
|
||||
connection_slab.borrow_mut().try_remove(key);
|
||||
}))
|
||||
.detach();
|
||||
|
||||
if let Some(reference) = connection_slab.borrow_mut().get_mut(key) {
|
||||
reference.task_handle = Some(task_handle);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("accept connection: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn clean_connections(
|
||||
config: Rc<Config>,
|
||||
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
) -> Option<Duration> {
|
||||
let now = server_start_instant.seconds_elapsed();
|
||||
|
||||
connection_slab.borrow_mut().retain(|_, reference| {
|
||||
if reference.valid_until.valid(now) {
|
||||
true
|
||||
} else {
|
||||
if let Some(ref handle) = reference.task_handle {
|
||||
handle.cancel();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
connection_slab.borrow_mut().shrink_to_fit();
|
||||
|
||||
Some(Duration::from_secs(
|
||||
config.cleaning.connection_cleaning_interval,
|
||||
))
|
||||
}
|
||||
|
||||
struct Connection {
|
||||
config: Rc<Config>,
|
||||
access_list_cache: AccessListCache,
|
||||
request_senders: Rc<Senders<ChannelRequest>>,
|
||||
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
stream: TlsStream<TcpStream>,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
connection_id: ConnectionId,
|
||||
request_buffer: [u8; REQUEST_BUFFER_SIZE],
|
||||
request_buffer_position: usize,
|
||||
response_buffer: [u8; RESPONSE_BUFFER_SIZE],
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
async fn run(
|
||||
config: Rc<Config>,
|
||||
access_list: Arc<AccessListArcSwap>,
|
||||
request_senders: Rc<Senders<ChannelRequest>>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
connection_id: ConnectionId,
|
||||
tls_config: Arc<RustlsConfig>,
|
||||
connection_slab: Rc<RefCell<Slab<ConnectionReference>>>,
|
||||
stream: TcpStream,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
) -> anyhow::Result<()> {
|
||||
let tls_acceptor: TlsAcceptor = tls_config.into();
|
||||
let stream = tls_acceptor.accept(stream).await?;
|
||||
|
||||
let mut response_buffer = [0; RESPONSE_BUFFER_SIZE];
|
||||
|
||||
response_buffer[..RESPONSE_HEADER.len()].copy_from_slice(&RESPONSE_HEADER);
|
||||
|
||||
let mut conn = Connection {
|
||||
config: config.clone(),
|
||||
access_list_cache: create_access_list_cache(&access_list),
|
||||
request_senders: request_senders.clone(),
|
||||
connection_slab,
|
||||
server_start_instant,
|
||||
stream,
|
||||
peer_addr,
|
||||
connection_id,
|
||||
request_buffer: [0; REQUEST_BUFFER_SIZE],
|
||||
request_buffer_position: 0,
|
||||
response_buffer,
|
||||
};
|
||||
|
||||
conn.run_request_response_loop().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_request_response_loop(&mut self) -> anyhow::Result<()> {
|
||||
loop {
|
||||
let response = match self.read_request().await? {
|
||||
Either::Left(response) => Response::Failure(response),
|
||||
Either::Right(request) => self.handle_request(request).await?,
|
||||
};
|
||||
|
||||
self.write_response(&response).await?;
|
||||
|
||||
if matches!(response, Response::Failure(_)) || !self.config.network.keep_alive {
|
||||
let _ = self
|
||||
.stream
|
||||
.get_ref()
|
||||
.0
|
||||
.shutdown(std::net::Shutdown::Both)
|
||||
.await;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_request(&mut self) -> anyhow::Result<Either<FailureResponse, Request>> {
|
||||
self.request_buffer_position = 0;
|
||||
|
||||
loop {
|
||||
if self.request_buffer_position == self.request_buffer.len() {
|
||||
return Err(anyhow::anyhow!("request buffer is full"));
|
||||
}
|
||||
|
||||
let bytes_read = self
|
||||
.stream
|
||||
.read(&mut self.request_buffer[self.request_buffer_position..])
|
||||
.await?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
return Err(anyhow::anyhow!("peer closed connection"));
|
||||
}
|
||||
|
||||
self.request_buffer_position += bytes_read;
|
||||
|
||||
match Request::from_bytes(&self.request_buffer[..self.request_buffer_position]) {
|
||||
Ok(request) => {
|
||||
return Ok(Either::Right(request));
|
||||
}
|
||||
Err(RequestParseError::Invalid(err)) => {
|
||||
let response = FailureResponse {
|
||||
failure_reason: "Invalid request".into(),
|
||||
};
|
||||
|
||||
::log::debug!("Invalid request: {:#}", err);
|
||||
|
||||
return Ok(Either::Left(response));
|
||||
}
|
||||
Err(RequestParseError::NeedMoreData) => {
|
||||
::log::debug!(
|
||||
"need more request data. current data: {}",
|
||||
&self.request_buffer[..self.request_buffer_position].escape_ascii()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Take a request and:
|
||||
/// - Update connection ValidUntil
|
||||
/// - Return error response if request is not allowed
|
||||
/// - If it is an announce request, send it to swarm workers an await a
|
||||
/// response
|
||||
/// - If it is a scrape requests, split it up, pass on the parts to
|
||||
/// relevant swarm workers and await a response
|
||||
async fn handle_request(&mut self, request: Request) -> anyhow::Result<Response> {
|
||||
if let Ok(mut slab) = self.connection_slab.try_borrow_mut() {
|
||||
if let Some(reference) = slab.get_mut(self.connection_id.0) {
|
||||
reference.valid_until = ValidUntil::new(
|
||||
self.server_start_instant,
|
||||
self.config.cleaning.max_connection_idle,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
match request {
|
||||
Request::Announce(request) => {
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::increment_counter!(
|
||||
"aquatic_requests_total",
|
||||
"type" => "announce",
|
||||
"ip_version" => peer_addr_to_ip_version_str(&self.peer_addr),
|
||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||
);
|
||||
|
||||
let info_hash = request.info_hash;
|
||||
|
||||
if self
|
||||
.access_list_cache
|
||||
.load()
|
||||
.allows(self.config.access_list.mode, &info_hash.0)
|
||||
{
|
||||
let (response_sender, response_receiver) = shared_channel::new_bounded(1);
|
||||
|
||||
let request = ChannelRequest::Announce {
|
||||
request,
|
||||
peer_addr: self.peer_addr,
|
||||
response_sender,
|
||||
};
|
||||
|
||||
let consumer_index = calculate_request_consumer_index(&self.config, info_hash);
|
||||
|
||||
// Only fails when receiver is closed
|
||||
self.request_senders
|
||||
.send_to(consumer_index, request)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
response_receiver
|
||||
.connect()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.ok_or_else(|| anyhow::anyhow!("response sender closed"))
|
||||
.map(Response::Announce)
|
||||
} else {
|
||||
let response = Response::Failure(FailureResponse {
|
||||
failure_reason: "Info hash not allowed".into(),
|
||||
});
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
Request::Scrape(ScrapeRequest { info_hashes }) => {
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::increment_counter!(
|
||||
"aquatic_requests_total",
|
||||
"type" => "scrape",
|
||||
"ip_version" => peer_addr_to_ip_version_str(&self.peer_addr),
|
||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||
);
|
||||
|
||||
let mut info_hashes_by_worker: BTreeMap<usize, Vec<InfoHash>> = BTreeMap::new();
|
||||
|
||||
for info_hash in info_hashes.into_iter() {
|
||||
let info_hashes = info_hashes_by_worker
|
||||
.entry(calculate_request_consumer_index(&self.config, info_hash))
|
||||
.or_default();
|
||||
|
||||
info_hashes.push(info_hash);
|
||||
}
|
||||
|
||||
let pending_worker_responses = info_hashes_by_worker.len();
|
||||
let mut response_receivers = Vec::with_capacity(pending_worker_responses);
|
||||
|
||||
for (consumer_index, info_hashes) in info_hashes_by_worker {
|
||||
let (response_sender, response_receiver) = shared_channel::new_bounded(1);
|
||||
|
||||
response_receivers.push(response_receiver);
|
||||
|
||||
let request = ChannelRequest::Scrape {
|
||||
request: ScrapeRequest { info_hashes },
|
||||
peer_addr: self.peer_addr,
|
||||
response_sender,
|
||||
};
|
||||
|
||||
// Only fails when receiver is closed
|
||||
self.request_senders
|
||||
.send_to(consumer_index, request)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let pending_scrape_response = PendingScrapeResponse {
|
||||
pending_worker_responses,
|
||||
stats: Default::default(),
|
||||
};
|
||||
|
||||
self.wait_for_scrape_responses(response_receivers, pending_scrape_response)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wait for partial scrape responses to arrive,
|
||||
/// return full response
|
||||
async fn wait_for_scrape_responses(
|
||||
&self,
|
||||
response_receivers: Vec<SharedReceiver<ScrapeResponse>>,
|
||||
mut pending: PendingScrapeResponse,
|
||||
) -> anyhow::Result<Response> {
|
||||
let mut responses = response_receivers
|
||||
.into_iter()
|
||||
.map(|receiver| async { receiver.connect().await.recv().await })
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
loop {
|
||||
let response = responses
|
||||
.next()
|
||||
.await
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("stream ended before all partial scrape responses received")
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!(
|
||||
"wait_for_scrape_response: can't receive response, sender is closed"
|
||||
)
|
||||
})?;
|
||||
|
||||
pending.stats.extend(response.files);
|
||||
pending.pending_worker_responses -= 1;
|
||||
|
||||
if pending.pending_worker_responses == 0 {
|
||||
let response = Response::Scrape(ScrapeResponse {
|
||||
files: pending.stats,
|
||||
});
|
||||
|
||||
break Ok(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_response(&mut self, response: &Response) -> anyhow::Result<()> {
|
||||
// Write body and final newline to response buffer
|
||||
|
||||
let mut position = RESPONSE_HEADER.len();
|
||||
|
||||
let body_len = response.write(&mut &mut self.response_buffer[position..])?;
|
||||
|
||||
position += body_len;
|
||||
|
||||
if position + 2 > self.response_buffer.len() {
|
||||
::log::error!("Response buffer is too short for response");
|
||||
|
||||
return Err(anyhow::anyhow!("Response buffer is too short for response"));
|
||||
}
|
||||
|
||||
(&mut self.response_buffer[position..position + 2]).copy_from_slice(b"\r\n");
|
||||
|
||||
position += 2;
|
||||
|
||||
let content_len = body_len + 2;
|
||||
|
||||
// Clear content-len header value
|
||||
|
||||
{
|
||||
let start = RESPONSE_HEADER_A.len();
|
||||
let end = start + RESPONSE_HEADER_B.len();
|
||||
|
||||
(&mut self.response_buffer[start..end]).copy_from_slice(RESPONSE_HEADER_B);
|
||||
}
|
||||
|
||||
// Set content-len header value
|
||||
|
||||
{
|
||||
let mut buf = ::itoa::Buffer::new();
|
||||
let content_len_bytes = buf.format(content_len).as_bytes();
|
||||
|
||||
let start = RESPONSE_HEADER_A.len();
|
||||
let end = start + content_len_bytes.len();
|
||||
|
||||
(&mut self.response_buffer[start..end]).copy_from_slice(content_len_bytes);
|
||||
}
|
||||
|
||||
// Write buffer to stream
|
||||
|
||||
self.stream.write(&self.response_buffer[..position]).await?;
|
||||
self.stream.flush().await?;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
{
|
||||
let response_type = match response {
|
||||
Response::Announce(_) => "announce",
|
||||
Response::Scrape(_) => "scrape",
|
||||
Response::Failure(_) => "error",
|
||||
};
|
||||
|
||||
::metrics::increment_counter!(
|
||||
"aquatic_responses_total",
|
||||
"type" => response_type,
|
||||
"ip_version" => peer_addr_to_ip_version_str(&self.peer_addr),
|
||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn calculate_request_consumer_index(config: &Config, info_hash: InfoHash) -> usize {
|
||||
(info_hash.0[0] as usize) % config.swarm_workers
|
||||
}
|
||||
|
||||
fn create_tcp_listener(
|
||||
config: &Config,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
) -> anyhow::Result<TcpListener> {
|
||||
let domain = if config.network.address.is_ipv4() {
|
||||
socket2::Domain::IPV4
|
||||
} else {
|
||||
socket2::Domain::IPV6
|
||||
};
|
||||
|
||||
let socket = socket2::Socket::new(domain, socket2::Type::STREAM, Some(socket2::Protocol::TCP))?;
|
||||
|
||||
if config.network.only_ipv6 {
|
||||
socket
|
||||
.set_only_v6(true)
|
||||
.with_context(|| "socket: set only ipv6")?;
|
||||
}
|
||||
|
||||
socket
|
||||
.set_reuse_port(true)
|
||||
.with_context(|| "socket: set reuse port")?;
|
||||
|
||||
socket
|
||||
.bind(&config.network.address.into())
|
||||
.with_context(|| format!("socket: bind to {}", config.network.address))?;
|
||||
|
||||
socket
|
||||
.listen(config.network.tcp_backlog)
|
||||
.with_context(|| format!("socket: listen on {}", config.network.address))?;
|
||||
|
||||
priv_dropper.after_socket_creation()?;
|
||||
|
||||
Ok(unsafe { TcpListener::from_raw_fd(socket.into_raw_fd()) })
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
fn peer_addr_to_ip_version_str(addr: &CanonicalSocketAddr) -> &'static str {
|
||||
if addr.is_ipv4() {
|
||||
"4"
|
||||
} else {
|
||||
"6"
|
||||
}
|
||||
}
|
||||
517
crates/http/src/workers/swarm.rs
Normal file
517
crates/http/src/workers/swarm.rs
Normal file
|
|
@ -0,0 +1,517 @@
|
|||
use std::cell::RefCell;
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures_lite::{Stream, StreamExt};
|
||||
use glommio::channels::channel_mesh::{MeshBuilder, Partial, Role};
|
||||
use glommio::timer::TimerActionRepeat;
|
||||
use glommio::{enclose, prelude::*};
|
||||
use rand::prelude::SmallRng;
|
||||
use rand::Rng;
|
||||
use rand::SeedableRng;
|
||||
|
||||
use aquatic_common::access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache};
|
||||
use aquatic_common::{
|
||||
extract_response_peers, CanonicalSocketAddr, IndexMap, PanicSentinel, SecondsSinceServerStart,
|
||||
ServerStartInstant, ValidUntil,
|
||||
};
|
||||
use aquatic_http_protocol::common::*;
|
||||
use aquatic_http_protocol::request::*;
|
||||
use aquatic_http_protocol::response::ResponsePeer;
|
||||
use aquatic_http_protocol::response::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
thread_local! { static WORKER_INDEX: ::std::cell::Cell<usize> = Default::default() }
|
||||
|
||||
pub trait Ip: ::std::fmt::Debug + Copy + Eq + ::std::hash::Hash {
|
||||
#[cfg(feature = "metrics")]
|
||||
fn ip_version_str() -> &'static str;
|
||||
}
|
||||
|
||||
impl Ip for Ipv4Addr {
|
||||
#[cfg(feature = "metrics")]
|
||||
fn ip_version_str() -> &'static str {
|
||||
"4"
|
||||
}
|
||||
}
|
||||
impl Ip for Ipv6Addr {
|
||||
#[cfg(feature = "metrics")]
|
||||
fn ip_version_str() -> &'static str {
|
||||
"6"
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
|
||||
pub enum PeerStatus {
|
||||
Seeding,
|
||||
Leeching,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
impl PeerStatus {
|
||||
/// Determine peer status from announce event and number of bytes left.
|
||||
///
|
||||
/// Likely, the last branch will be taken most of the time.
|
||||
#[inline]
|
||||
pub fn from_event_and_bytes_left(event: AnnounceEvent, opt_bytes_left: Option<usize>) -> Self {
|
||||
if let AnnounceEvent::Stopped = event {
|
||||
Self::Stopped
|
||||
} else if let Some(0) = opt_bytes_left {
|
||||
Self::Seeding
|
||||
} else {
|
||||
Self::Leeching
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Peer<I: Ip> {
|
||||
pub ip_address: I,
|
||||
pub port: u16,
|
||||
pub valid_until: ValidUntil,
|
||||
pub seeder: bool,
|
||||
}
|
||||
|
||||
impl<I: Ip> Peer<I> {
|
||||
pub fn to_response_peer(&self) -> ResponsePeer<I> {
|
||||
ResponsePeer {
|
||||
ip_address: self.ip_address,
|
||||
port: self.port,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct PeerMapKey<I: Ip> {
|
||||
pub peer_id: PeerId,
|
||||
pub ip: I,
|
||||
}
|
||||
|
||||
pub type PeerMap<I> = IndexMap<PeerMapKey<I>, Peer<I>>;
|
||||
|
||||
pub struct TorrentData<I: Ip> {
|
||||
pub peers: PeerMap<I>,
|
||||
pub num_seeders: usize,
|
||||
}
|
||||
|
||||
impl<I: Ip> Default for TorrentData<I> {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
peers: Default::default(),
|
||||
num_seeders: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Ip> TorrentData<I> {
|
||||
fn num_leechers(&self) -> usize {
|
||||
self.peers.len() - self.num_seeders
|
||||
}
|
||||
}
|
||||
|
||||
pub type TorrentMap<I> = IndexMap<InfoHash, TorrentData<I>>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TorrentMaps {
|
||||
pub ipv4: TorrentMap<Ipv4Addr>,
|
||||
pub ipv6: TorrentMap<Ipv6Addr>,
|
||||
}
|
||||
|
||||
impl TorrentMaps {
|
||||
pub fn clean(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
access_list: &Arc<AccessListArcSwap>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
) {
|
||||
let mut access_list_cache = create_access_list_cache(access_list);
|
||||
|
||||
let now = server_start_instant.seconds_elapsed();
|
||||
|
||||
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv4, now);
|
||||
Self::clean_torrent_map(config, &mut access_list_cache, &mut self.ipv6, now);
|
||||
}
|
||||
|
||||
fn clean_torrent_map<I: Ip>(
|
||||
config: &Config,
|
||||
access_list_cache: &mut AccessListCache,
|
||||
torrent_map: &mut TorrentMap<I>,
|
||||
now: SecondsSinceServerStart,
|
||||
) {
|
||||
let mut total_num_peers = 0;
|
||||
|
||||
torrent_map.retain(|info_hash, torrent_data| {
|
||||
if !access_list_cache
|
||||
.load()
|
||||
.allows(config.access_list.mode, &info_hash.0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
let num_seeders = &mut torrent_data.num_seeders;
|
||||
|
||||
torrent_data.peers.retain(|_, peer| {
|
||||
let keep = peer.valid_until.valid(now);
|
||||
|
||||
if (!keep) & peer.seeder {
|
||||
*num_seeders -= 1;
|
||||
}
|
||||
|
||||
keep
|
||||
});
|
||||
|
||||
total_num_peers += torrent_data.peers.len() as u64;
|
||||
|
||||
!torrent_data.peers.is_empty()
|
||||
});
|
||||
|
||||
let total_num_peers = total_num_peers as f64;
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers",
|
||||
total_num_peers,
|
||||
"ip_version" => I::ip_version_str(),
|
||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||
);
|
||||
|
||||
torrent_map.shrink_to_fit();
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_swarm_worker(
|
||||
_sentinel: PanicSentinel,
|
||||
config: Config,
|
||||
state: State,
|
||||
request_mesh_builder: MeshBuilder<ChannelRequest, Partial>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
worker_index: usize,
|
||||
) {
|
||||
#[cfg(feature = "metrics")]
|
||||
WORKER_INDEX.with(|index| index.set(worker_index));
|
||||
|
||||
let (_, mut request_receivers) = request_mesh_builder.join(Role::Consumer).await.unwrap();
|
||||
|
||||
let torrents = Rc::new(RefCell::new(TorrentMaps::default()));
|
||||
let access_list = state.access_list;
|
||||
|
||||
// Periodically clean torrents
|
||||
TimerActionRepeat::repeat(enclose!((config, torrents, access_list) move || {
|
||||
enclose!((config, torrents, access_list) move || async move {
|
||||
torrents.borrow_mut().clean(&config, &access_list, server_start_instant);
|
||||
|
||||
Some(Duration::from_secs(config.cleaning.torrent_cleaning_interval))
|
||||
})()
|
||||
}));
|
||||
|
||||
let max_peer_age = config.cleaning.max_peer_age;
|
||||
let peer_valid_until = Rc::new(RefCell::new(ValidUntil::new(
|
||||
server_start_instant,
|
||||
max_peer_age,
|
||||
)));
|
||||
|
||||
// Periodically update peer_valid_until
|
||||
TimerActionRepeat::repeat(enclose!((peer_valid_until) move || {
|
||||
enclose!((peer_valid_until) move || async move {
|
||||
*peer_valid_until.borrow_mut() = ValidUntil::new(server_start_instant, max_peer_age);
|
||||
|
||||
Some(Duration::from_secs(1))
|
||||
})()
|
||||
}));
|
||||
|
||||
// Periodically update torrent count metrics
|
||||
#[cfg(feature = "metrics")]
|
||||
TimerActionRepeat::repeat(enclose!((config, torrents) move || {
|
||||
enclose!((config, torrents, worker_index) move || async move {
|
||||
let torrents = torrents.borrow_mut();
|
||||
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
torrents.ipv4.len() as f64,
|
||||
"ip_version" => "4",
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
torrents.ipv6.len() as f64,
|
||||
"ip_version" => "6",
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
|
||||
Some(Duration::from_secs(config.metrics.torrent_count_update_interval))
|
||||
})()
|
||||
}));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
|
||||
for (_, receiver) in request_receivers.streams() {
|
||||
let handle = spawn_local(handle_request_stream(
|
||||
config.clone(),
|
||||
torrents.clone(),
|
||||
peer_valid_until.clone(),
|
||||
receiver,
|
||||
))
|
||||
.detach();
|
||||
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_request_stream<S>(
|
||||
config: Config,
|
||||
torrents: Rc<RefCell<TorrentMaps>>,
|
||||
peer_valid_until: Rc<RefCell<ValidUntil>>,
|
||||
mut stream: S,
|
||||
) where
|
||||
S: Stream<Item = ChannelRequest> + ::std::marker::Unpin,
|
||||
{
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
||||
while let Some(channel_request) = stream.next().await {
|
||||
match channel_request {
|
||||
ChannelRequest::Announce {
|
||||
request,
|
||||
peer_addr,
|
||||
response_sender,
|
||||
} => {
|
||||
let response = handle_announce_request(
|
||||
&config,
|
||||
&mut rng,
|
||||
&mut torrents.borrow_mut(),
|
||||
peer_valid_until.borrow().to_owned(),
|
||||
peer_addr,
|
||||
request,
|
||||
);
|
||||
|
||||
if let Err(err) = response_sender.connect().await.send(response).await {
|
||||
::log::error!("swarm worker could not send announce response: {:#}", err);
|
||||
}
|
||||
}
|
||||
ChannelRequest::Scrape {
|
||||
request,
|
||||
peer_addr,
|
||||
response_sender,
|
||||
} => {
|
||||
let response =
|
||||
handle_scrape_request(&config, &mut torrents.borrow_mut(), peer_addr, request);
|
||||
|
||||
if let Err(err) = response_sender.connect().await.send(response).await {
|
||||
::log::error!("swarm worker could not send scrape response: {:#}", err);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_announce_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
torrent_maps: &mut TorrentMaps,
|
||||
valid_until: ValidUntil,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
request: AnnounceRequest,
|
||||
) -> AnnounceResponse {
|
||||
match peer_addr.get().ip() {
|
||||
IpAddr::V4(peer_ip_address) => {
|
||||
let torrent_data: &mut TorrentData<Ipv4Addr> =
|
||||
torrent_maps.ipv4.entry(request.info_hash).or_default();
|
||||
|
||||
let (seeders, leechers, response_peers) = upsert_peer_and_get_response_peers(
|
||||
config,
|
||||
rng,
|
||||
peer_ip_address,
|
||||
torrent_data,
|
||||
request,
|
||||
valid_until,
|
||||
);
|
||||
|
||||
let response = AnnounceResponse {
|
||||
complete: seeders,
|
||||
incomplete: leechers,
|
||||
announce_interval: config.protocol.peer_announce_interval,
|
||||
peers: ResponsePeerListV4(response_peers),
|
||||
peers6: ResponsePeerListV6(vec![]),
|
||||
warning_message: None,
|
||||
};
|
||||
|
||||
response
|
||||
}
|
||||
IpAddr::V6(peer_ip_address) => {
|
||||
let torrent_data: &mut TorrentData<Ipv6Addr> =
|
||||
torrent_maps.ipv6.entry(request.info_hash).or_default();
|
||||
|
||||
let (seeders, leechers, response_peers) = upsert_peer_and_get_response_peers(
|
||||
config,
|
||||
rng,
|
||||
peer_ip_address,
|
||||
torrent_data,
|
||||
request,
|
||||
valid_until,
|
||||
);
|
||||
|
||||
let response = AnnounceResponse {
|
||||
complete: seeders,
|
||||
incomplete: leechers,
|
||||
announce_interval: config.protocol.peer_announce_interval,
|
||||
peers: ResponsePeerListV4(vec![]),
|
||||
peers6: ResponsePeerListV6(response_peers),
|
||||
warning_message: None,
|
||||
};
|
||||
|
||||
response
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert/update peer. Return num_seeders, num_leechers and response peers
|
||||
pub fn upsert_peer_and_get_response_peers<I: Ip>(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
peer_ip_address: I,
|
||||
torrent_data: &mut TorrentData<I>,
|
||||
request: AnnounceRequest,
|
||||
valid_until: ValidUntil,
|
||||
) -> (usize, usize, Vec<ResponsePeer<I>>) {
|
||||
// Insert/update/remove peer who sent this request
|
||||
|
||||
let peer_status =
|
||||
PeerStatus::from_event_and_bytes_left(request.event, Some(request.bytes_left));
|
||||
|
||||
let peer_map_key = PeerMapKey {
|
||||
peer_id: request.peer_id,
|
||||
ip: peer_ip_address,
|
||||
};
|
||||
|
||||
let opt_removed_peer = match peer_status {
|
||||
PeerStatus::Leeching => {
|
||||
let peer = Peer {
|
||||
ip_address: peer_ip_address,
|
||||
port: request.port,
|
||||
valid_until,
|
||||
seeder: false,
|
||||
};
|
||||
|
||||
torrent_data.peers.insert(peer_map_key.clone(), peer)
|
||||
}
|
||||
PeerStatus::Seeding => {
|
||||
torrent_data.num_seeders += 1;
|
||||
|
||||
let peer = Peer {
|
||||
ip_address: peer_ip_address,
|
||||
port: request.port,
|
||||
valid_until,
|
||||
seeder: true,
|
||||
};
|
||||
|
||||
torrent_data.peers.insert(peer_map_key.clone(), peer)
|
||||
}
|
||||
PeerStatus::Stopped => torrent_data.peers.remove(&peer_map_key),
|
||||
};
|
||||
|
||||
if let Some(&Peer { seeder: true, .. }) = opt_removed_peer.as_ref() {
|
||||
torrent_data.num_seeders -= 1;
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
match peer_status {
|
||||
PeerStatus::Stopped if opt_removed_peer.is_some() => {
|
||||
::metrics::decrement_gauge!(
|
||||
"aquatic_peers",
|
||||
1.0,
|
||||
"ip_version" => I::ip_version_str(),
|
||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||
);
|
||||
}
|
||||
PeerStatus::Leeching | PeerStatus::Seeding if opt_removed_peer.is_none() => {
|
||||
::metrics::increment_gauge!(
|
||||
"aquatic_peers",
|
||||
1.0,
|
||||
"ip_version" => I::ip_version_str(),
|
||||
"worker_index" => WORKER_INDEX.with(|index| index.get()).to_string(),
|
||||
);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let response_peers = if let PeerStatus::Stopped = peer_status {
|
||||
Vec::new()
|
||||
} else {
|
||||
let max_num_peers_to_take = match request.numwant {
|
||||
Some(0) | None => config.protocol.max_peers,
|
||||
Some(numwant) => numwant.min(config.protocol.max_peers),
|
||||
};
|
||||
|
||||
extract_response_peers(
|
||||
rng,
|
||||
&torrent_data.peers,
|
||||
max_num_peers_to_take,
|
||||
peer_map_key,
|
||||
Peer::to_response_peer,
|
||||
)
|
||||
};
|
||||
|
||||
(
|
||||
torrent_data.num_seeders,
|
||||
torrent_data.num_leechers(),
|
||||
response_peers,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn handle_scrape_request(
|
||||
config: &Config,
|
||||
torrent_maps: &mut TorrentMaps,
|
||||
peer_addr: CanonicalSocketAddr,
|
||||
request: ScrapeRequest,
|
||||
) -> ScrapeResponse {
|
||||
let num_to_take = request
|
||||
.info_hashes
|
||||
.len()
|
||||
.min(config.protocol.max_scrape_torrents);
|
||||
|
||||
let mut response = ScrapeResponse {
|
||||
files: BTreeMap::new(),
|
||||
};
|
||||
|
||||
let peer_ip = peer_addr.get().ip();
|
||||
|
||||
// If request.info_hashes is empty, don't return scrape for all
|
||||
// torrents, even though reference server does it. It is too expensive.
|
||||
if peer_ip.is_ipv4() {
|
||||
for info_hash in request.info_hashes.into_iter().take(num_to_take) {
|
||||
if let Some(torrent_data) = torrent_maps.ipv4.get(&info_hash) {
|
||||
let stats = ScrapeStatistics {
|
||||
complete: torrent_data.num_seeders,
|
||||
downloaded: 0, // No implementation planned
|
||||
incomplete: torrent_data.num_leechers(),
|
||||
};
|
||||
|
||||
response.files.insert(info_hash, stats);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for info_hash in request.info_hashes.into_iter().take(num_to_take) {
|
||||
if let Some(torrent_data) = torrent_maps.ipv6.get(&info_hash) {
|
||||
let stats = ScrapeStatistics {
|
||||
complete: torrent_data.num_seeders,
|
||||
downloaded: 0, // No implementation planned
|
||||
incomplete: torrent_data.num_leechers(),
|
||||
};
|
||||
|
||||
response.files.insert(info_hash, stats);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
response
|
||||
}
|
||||
35
crates/http_load_test/Cargo.toml
Normal file
35
crates/http_load_test/Cargo.toml
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
[package]
|
||||
name = "aquatic_http_load_test"
|
||||
description = "BitTorrent (HTTP over TLS) load tester"
|
||||
keywords = ["http", "benchmark", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_http_load_test"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common = { workspace = true, features = ["glommio"] }
|
||||
aquatic_http_protocol.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
futures-lite = "1"
|
||||
futures-rustls = "0.24"
|
||||
hashbrown = "0.14"
|
||||
glommio = "0.8"
|
||||
log = "0.4"
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
rand_distr = "0.4"
|
||||
rustls = { version = "0.21", default-features = false, features = ["logging", "dangerous_configuration"] } # TLS 1.2 disabled
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
39
crates/http_load_test/src/common.rs
Normal file
39
crates/http_load_test/src/common.rs
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
use std::sync::{atomic::AtomicUsize, Arc};
|
||||
|
||||
use rand_distr::Gamma;
|
||||
|
||||
pub use aquatic_http_protocol::common::*;
|
||||
pub use aquatic_http_protocol::request::*;
|
||||
pub use aquatic_http_protocol::response::*;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct TorrentPeer {
|
||||
pub info_hash: InfoHash,
|
||||
pub scrape_hash_indeces: Vec<usize>,
|
||||
pub peer_id: PeerId,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Statistics {
|
||||
pub requests: AtomicUsize,
|
||||
pub response_peers: AtomicUsize,
|
||||
pub responses_announce: AtomicUsize,
|
||||
pub responses_scrape: AtomicUsize,
|
||||
pub responses_failure: AtomicUsize,
|
||||
pub bytes_sent: AtomicUsize,
|
||||
pub bytes_received: AtomicUsize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LoadTestState {
|
||||
pub info_hashes: Arc<Vec<InfoHash>>,
|
||||
pub statistics: Arc<Statistics>,
|
||||
pub gamma: Arc<Gamma<f64>>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
pub enum RequestType {
|
||||
Announce,
|
||||
Scrape,
|
||||
}
|
||||
89
crates/http_load_test/src/config.rs
Normal file
89
crates/http_load_test/src/config.rs
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
use std::net::SocketAddr;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
use aquatic_common::cpu_pinning::desc::CpuPinningConfigDesc;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::Deserialize;
|
||||
|
||||
/// aquatic_http_load_test configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
pub server_address: SocketAddr,
|
||||
pub log_level: LogLevel,
|
||||
pub num_workers: usize,
|
||||
/// Maximum number of connections to keep open
|
||||
pub num_connections: usize,
|
||||
/// How often to check if num_connections connections are open, and
|
||||
/// open a new one otherwise. A value of 0 means that connections are
|
||||
/// opened as quickly as possible, which is useful when the tracker
|
||||
/// does not keep connections alive.
|
||||
pub connection_creation_interval_ms: u64,
|
||||
/// Announce/scrape url suffix. Use `/my_token/` to get `/announce/my_token/`
|
||||
pub url_suffix: String,
|
||||
pub duration: usize,
|
||||
pub keep_alive: bool,
|
||||
pub torrents: TorrentConfig,
|
||||
pub cpu_pinning: CpuPinningConfigDesc,
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server_address: "127.0.0.1:3000".parse().unwrap(),
|
||||
log_level: LogLevel::Error,
|
||||
num_workers: 1,
|
||||
num_connections: 128,
|
||||
connection_creation_interval_ms: 10,
|
||||
url_suffix: "".into(),
|
||||
duration: 0,
|
||||
keep_alive: true,
|
||||
torrents: TorrentConfig::default(),
|
||||
cpu_pinning: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct TorrentConfig {
|
||||
pub number_of_torrents: usize,
|
||||
/// Probability that a generated peer is a seeder
|
||||
pub peer_seeder_probability: f64,
|
||||
/// Probability that a generated request is a announce request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_announce: usize,
|
||||
/// Probability that a generated request is a scrape request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_scrape: usize,
|
||||
/// Peers choose torrents according to this Gamma distribution shape
|
||||
pub torrent_gamma_shape: f64,
|
||||
/// Peers choose torrents according to this Gamma distribution scale
|
||||
pub torrent_gamma_scale: f64,
|
||||
}
|
||||
|
||||
impl Default for TorrentConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
number_of_torrents: 10_000,
|
||||
peer_seeder_probability: 0.25,
|
||||
weight_announce: 5,
|
||||
weight_scrape: 0,
|
||||
torrent_gamma_shape: 0.2,
|
||||
torrent_gamma_scale: 100.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
||||
204
crates/http_load_test/src/main.rs
Normal file
204
crates/http_load_test/src/main.rs
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
use std::sync::{atomic::Ordering, Arc};
|
||||
use std::thread;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use ::glommio::LocalExecutorBuilder;
|
||||
use aquatic_common::cpu_pinning::glommio::{get_worker_placement, set_affinity_for_util_worker};
|
||||
use aquatic_common::cpu_pinning::WorkerIndex;
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
mod common;
|
||||
mod config;
|
||||
mod network;
|
||||
mod utils;
|
||||
|
||||
use common::*;
|
||||
use config::*;
|
||||
use network::*;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
/// Multiply bytes during a second with this to get Mbit/s
|
||||
const MBITS_FACTOR: f64 = 1.0 / ((1024.0 * 1024.0) / 8.0);
|
||||
|
||||
pub fn main() {
|
||||
aquatic_common::cli::run_app_with_cli_and_config::<Config>(
|
||||
"aquatic_http_load_test: BitTorrent load tester",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
if config.torrents.weight_announce + config.torrents.weight_scrape == 0 {
|
||||
panic!("Error: at least one weight must be larger than zero.");
|
||||
}
|
||||
|
||||
println!("Starting client with config: {:#?}", config);
|
||||
|
||||
let mut info_hashes = Vec::with_capacity(config.torrents.number_of_torrents);
|
||||
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
||||
for _ in 0..config.torrents.number_of_torrents {
|
||||
info_hashes.push(InfoHash(rng.gen()));
|
||||
}
|
||||
|
||||
let gamma = Gamma::new(
|
||||
config.torrents.torrent_gamma_shape,
|
||||
config.torrents.torrent_gamma_scale,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let state = LoadTestState {
|
||||
info_hashes: Arc::new(info_hashes),
|
||||
statistics: Arc::new(Statistics::default()),
|
||||
gamma: Arc::new(gamma),
|
||||
};
|
||||
|
||||
let tls_config = create_tls_config().unwrap();
|
||||
|
||||
for i in 0..config.num_workers {
|
||||
let config = config.clone();
|
||||
let tls_config = tls_config.clone();
|
||||
let state = state.clone();
|
||||
|
||||
let placement = get_worker_placement(
|
||||
&config.cpu_pinning,
|
||||
config.num_workers,
|
||||
0,
|
||||
WorkerIndex::SocketWorker(i),
|
||||
)?;
|
||||
|
||||
LocalExecutorBuilder::new(placement)
|
||||
.name("load-test")
|
||||
.spawn(move || async move {
|
||||
run_socket_thread(config, tls_config, state).await.unwrap();
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
if config.cpu_pinning.active {
|
||||
set_affinity_for_util_worker(&config.cpu_pinning, config.num_workers, 0)?;
|
||||
}
|
||||
|
||||
monitor_statistics(state, &config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn monitor_statistics(state: LoadTestState, config: &Config) {
|
||||
let start_time = Instant::now();
|
||||
let mut report_avg_response_vec: Vec<f64> = Vec::new();
|
||||
|
||||
let interval = 5;
|
||||
let interval_f64 = interval as f64;
|
||||
|
||||
loop {
|
||||
thread::sleep(Duration::from_secs(interval));
|
||||
|
||||
let statistics = state.statistics.as_ref();
|
||||
|
||||
let responses_announce = statistics
|
||||
.responses_announce
|
||||
.fetch_and(0, Ordering::Relaxed) as f64;
|
||||
// let response_peers = statistics.response_peers
|
||||
// .fetch_and(0, Ordering::SeqCst) as f64;
|
||||
|
||||
let requests_per_second =
|
||||
statistics.requests.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
let responses_scrape_per_second =
|
||||
statistics.responses_scrape.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
let responses_failure_per_second =
|
||||
statistics.responses_failure.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
|
||||
let bytes_sent_per_second =
|
||||
statistics.bytes_sent.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
let bytes_received_per_second =
|
||||
statistics.bytes_received.fetch_and(0, Ordering::Relaxed) as f64 / interval_f64;
|
||||
|
||||
let responses_announce_per_second = responses_announce / interval_f64;
|
||||
|
||||
let responses_per_second = responses_announce_per_second
|
||||
+ responses_scrape_per_second
|
||||
+ responses_failure_per_second;
|
||||
|
||||
report_avg_response_vec.push(responses_per_second);
|
||||
|
||||
println!();
|
||||
println!("Requests out: {:.2}/second", requests_per_second);
|
||||
println!("Responses in: {:.2}/second", responses_per_second);
|
||||
println!(
|
||||
" - Announce responses: {:.2}",
|
||||
responses_announce_per_second
|
||||
);
|
||||
println!(" - Scrape responses: {:.2}", responses_scrape_per_second);
|
||||
println!(
|
||||
" - Failure responses: {:.2}",
|
||||
responses_failure_per_second
|
||||
);
|
||||
//println!("Peers per announce response: {:.2}", response_peers / responses_announce);
|
||||
println!(
|
||||
"Bandwidth out: {:.2}Mbit/s",
|
||||
bytes_sent_per_second * MBITS_FACTOR
|
||||
);
|
||||
println!(
|
||||
"Bandwidth in: {:.2}Mbit/s",
|
||||
bytes_received_per_second * MBITS_FACTOR
|
||||
);
|
||||
|
||||
let time_elapsed = start_time.elapsed();
|
||||
let duration = Duration::from_secs(config.duration as u64);
|
||||
|
||||
if config.duration != 0 && time_elapsed >= duration {
|
||||
let report_len = report_avg_response_vec.len() as f64;
|
||||
let report_sum: f64 = report_avg_response_vec.into_iter().sum();
|
||||
let report_avg: f64 = report_sum / report_len;
|
||||
|
||||
println!(
|
||||
concat!(
|
||||
"\n# aquatic load test report\n\n",
|
||||
"Test ran for {} seconds.\n",
|
||||
"Average responses per second: {:.2}\n\nConfig: {:#?}\n"
|
||||
),
|
||||
time_elapsed.as_secs(),
|
||||
report_avg,
|
||||
config
|
||||
);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct FakeCertificateVerifier;
|
||||
|
||||
impl rustls::client::ServerCertVerifier for FakeCertificateVerifier {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &rustls::Certificate,
|
||||
_intermediates: &[rustls::Certificate],
|
||||
_server_name: &rustls::ServerName,
|
||||
_scts: &mut dyn Iterator<Item = &[u8]>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: std::time::SystemTime,
|
||||
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
|
||||
Ok(rustls::client::ServerCertVerified::assertion())
|
||||
}
|
||||
}
|
||||
|
||||
fn create_tls_config() -> anyhow::Result<Arc<rustls::ClientConfig>> {
|
||||
let mut config = rustls::ClientConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_root_certificates(rustls::RootCertStore::empty())
|
||||
.with_no_client_auth();
|
||||
|
||||
config
|
||||
.dangerous()
|
||||
.set_certificate_verifier(Arc::new(FakeCertificateVerifier));
|
||||
|
||||
Ok(Arc::new(config))
|
||||
}
|
||||
254
crates/http_load_test/src/network.rs
Normal file
254
crates/http_load_test/src/network.rs
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
use std::{
|
||||
cell::RefCell,
|
||||
convert::TryInto,
|
||||
io::Cursor,
|
||||
rc::Rc,
|
||||
sync::{atomic::Ordering, Arc},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use aquatic_http_protocol::response::Response;
|
||||
use futures_lite::{AsyncReadExt, AsyncWriteExt};
|
||||
use futures_rustls::{client::TlsStream, TlsConnector};
|
||||
use glommio::net::TcpStream;
|
||||
use glommio::{prelude::*, timer::TimerActionRepeat};
|
||||
use rand::{prelude::SmallRng, SeedableRng};
|
||||
|
||||
use crate::{common::LoadTestState, config::Config, utils::create_random_request};
|
||||
|
||||
pub async fn run_socket_thread(
|
||||
config: Config,
|
||||
tls_config: Arc<rustls::ClientConfig>,
|
||||
load_test_state: LoadTestState,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = Rc::new(config);
|
||||
let num_active_connections = Rc::new(RefCell::new(0usize));
|
||||
let rng = Rc::new(RefCell::new(SmallRng::from_entropy()));
|
||||
|
||||
let interval = config.connection_creation_interval_ms;
|
||||
|
||||
if interval == 0 {
|
||||
loop {
|
||||
if *num_active_connections.borrow() < config.num_connections {
|
||||
if let Err(err) = Connection::run(
|
||||
config.clone(),
|
||||
tls_config.clone(),
|
||||
load_test_state.clone(),
|
||||
num_active_connections.clone(),
|
||||
rng.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
::log::error!("connection creation error: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let interval = Duration::from_millis(interval);
|
||||
|
||||
TimerActionRepeat::repeat(move || {
|
||||
periodically_open_connections(
|
||||
config.clone(),
|
||||
interval,
|
||||
tls_config.clone(),
|
||||
load_test_state.clone(),
|
||||
num_active_connections.clone(),
|
||||
rng.clone(),
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
futures_lite::future::pending::<bool>().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn periodically_open_connections(
|
||||
config: Rc<Config>,
|
||||
interval: Duration,
|
||||
tls_config: Arc<rustls::ClientConfig>,
|
||||
load_test_state: LoadTestState,
|
||||
num_active_connections: Rc<RefCell<usize>>,
|
||||
rng: Rc<RefCell<SmallRng>>,
|
||||
) -> Option<Duration> {
|
||||
if *num_active_connections.borrow() < config.num_connections {
|
||||
spawn_local(async move {
|
||||
if let Err(err) = Connection::run(
|
||||
config,
|
||||
tls_config,
|
||||
load_test_state,
|
||||
num_active_connections,
|
||||
rng.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
::log::error!("connection creation error: {:?}", err);
|
||||
}
|
||||
})
|
||||
.detach();
|
||||
}
|
||||
|
||||
Some(interval)
|
||||
}
|
||||
|
||||
struct Connection {
|
||||
config: Rc<Config>,
|
||||
load_test_state: LoadTestState,
|
||||
rng: Rc<RefCell<SmallRng>>,
|
||||
stream: TlsStream<TcpStream>,
|
||||
buffer: [u8; 2048],
|
||||
}
|
||||
|
||||
impl Connection {
|
||||
async fn run(
|
||||
config: Rc<Config>,
|
||||
tls_config: Arc<rustls::ClientConfig>,
|
||||
load_test_state: LoadTestState,
|
||||
num_active_connections: Rc<RefCell<usize>>,
|
||||
rng: Rc<RefCell<SmallRng>>,
|
||||
) -> anyhow::Result<()> {
|
||||
let stream = TcpStream::connect(config.server_address)
|
||||
.await
|
||||
.map_err(|err| anyhow::anyhow!("connect: {:?}", err))?;
|
||||
|
||||
let stream = TlsConnector::from(tls_config)
|
||||
.connect("example.com".try_into().unwrap(), stream)
|
||||
.await?;
|
||||
|
||||
let mut connection = Connection {
|
||||
config,
|
||||
load_test_state,
|
||||
rng,
|
||||
stream,
|
||||
buffer: [0; 2048],
|
||||
};
|
||||
|
||||
*num_active_connections.borrow_mut() += 1;
|
||||
|
||||
if let Err(err) = connection.run_connection_loop().await {
|
||||
::log::info!("connection error: {:?}", err);
|
||||
}
|
||||
|
||||
*num_active_connections.borrow_mut() -= 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_connection_loop(&mut self) -> anyhow::Result<()> {
|
||||
loop {
|
||||
self.send_request().await?;
|
||||
self.read_response().await?;
|
||||
|
||||
if !self.config.keep_alive {
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_request(&mut self) -> anyhow::Result<()> {
|
||||
let request = create_random_request(
|
||||
&self.config,
|
||||
&self.load_test_state,
|
||||
&mut self.rng.borrow_mut(),
|
||||
);
|
||||
|
||||
let mut cursor = Cursor::new(&mut self.buffer[..]);
|
||||
|
||||
request.write(&mut cursor, self.config.url_suffix.as_bytes())?;
|
||||
|
||||
let cursor_position = cursor.position() as usize;
|
||||
|
||||
let bytes_sent = self
|
||||
.stream
|
||||
.write(&cursor.into_inner()[..cursor_position])
|
||||
.await?;
|
||||
|
||||
self.stream.flush().await?;
|
||||
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.bytes_sent
|
||||
.fetch_add(bytes_sent, Ordering::Relaxed);
|
||||
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.requests
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn read_response(&mut self) -> anyhow::Result<()> {
|
||||
let mut buffer_position = 0;
|
||||
|
||||
loop {
|
||||
let bytes_read = self
|
||||
.stream
|
||||
.read(&mut self.buffer[buffer_position..])
|
||||
.await?;
|
||||
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
buffer_position += bytes_read;
|
||||
|
||||
let interesting_bytes = &self.buffer[..buffer_position];
|
||||
|
||||
let mut opt_body_start_index = None;
|
||||
|
||||
for (i, chunk) in interesting_bytes.windows(4).enumerate() {
|
||||
if chunk == b"\r\n\r\n" {
|
||||
opt_body_start_index = Some(i + 4);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(body_start_index) = opt_body_start_index {
|
||||
match Response::from_bytes(&interesting_bytes[body_start_index..]) {
|
||||
Ok(response) => {
|
||||
match response {
|
||||
Response::Announce(_) => {
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.responses_announce
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Scrape(_) => {
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.responses_scrape
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Failure(response) => {
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.responses_failure
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
println!("failure response: reason: {}", response.failure_reason);
|
||||
}
|
||||
}
|
||||
|
||||
self.load_test_state
|
||||
.statistics
|
||||
.bytes_received
|
||||
.fetch_add(interesting_bytes.len(), Ordering::Relaxed);
|
||||
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::warn!(
|
||||
"deserialize response error with {} bytes read: {:?}, text: {}",
|
||||
buffer_position,
|
||||
err,
|
||||
interesting_bytes.escape_ascii()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
81
crates/http_load_test/src/utils.rs
Normal file
81
crates/http_load_test/src/utils.rs
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use rand::distributions::WeightedIndex;
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::*;
|
||||
|
||||
pub fn create_random_request(
|
||||
config: &Config,
|
||||
state: &LoadTestState,
|
||||
rng: &mut SmallRng,
|
||||
) -> Request {
|
||||
let weights = [
|
||||
config.torrents.weight_announce as u32,
|
||||
config.torrents.weight_scrape as u32,
|
||||
];
|
||||
|
||||
let items = [RequestType::Announce, RequestType::Scrape];
|
||||
|
||||
let dist = WeightedIndex::new(&weights).expect("random request weighted index");
|
||||
|
||||
match items[dist.sample(rng)] {
|
||||
RequestType::Announce => create_announce_request(config, state, rng),
|
||||
RequestType::Scrape => create_scrape_request(config, state, rng),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn create_announce_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> Request {
|
||||
let (event, bytes_left) = {
|
||||
if rng.gen_bool(config.torrents.peer_seeder_probability) {
|
||||
(AnnounceEvent::Completed, 0)
|
||||
} else {
|
||||
(AnnounceEvent::Started, 50)
|
||||
}
|
||||
};
|
||||
|
||||
let info_hash_index = select_info_hash_index(config, &state, rng);
|
||||
|
||||
Request::Announce(AnnounceRequest {
|
||||
info_hash: state.info_hashes[info_hash_index],
|
||||
peer_id: PeerId(rng.gen()),
|
||||
bytes_left,
|
||||
event,
|
||||
key: None,
|
||||
numwant: None,
|
||||
port: rng.gen(),
|
||||
bytes_uploaded: 0,
|
||||
bytes_downloaded: 0,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn create_scrape_request(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> Request {
|
||||
let mut scrape_hashes = Vec::with_capacity(5);
|
||||
|
||||
for _ in 0..5 {
|
||||
let info_hash_index = select_info_hash_index(config, &state, rng);
|
||||
|
||||
scrape_hashes.push(state.info_hashes[info_hash_index]);
|
||||
}
|
||||
|
||||
Request::Scrape(ScrapeRequest {
|
||||
info_hashes: scrape_hashes,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn select_info_hash_index(config: &Config, state: &LoadTestState, rng: &mut impl Rng) -> usize {
|
||||
gamma_usize(rng, &state.gamma, config.torrents.number_of_torrents - 1)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn gamma_usize(rng: &mut impl Rng, gamma: &Arc<Gamma<f64>>, max: usize) -> usize {
|
||||
let p: f64 = gamma.sample(rng);
|
||||
let p = (p.min(101.0f64) - 1.0) / 100.0;
|
||||
|
||||
(p * max as f64) as usize
|
||||
}
|
||||
42
crates/http_protocol/Cargo.toml
Normal file
42
crates/http_protocol/Cargo.toml
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
[package]
|
||||
name = "aquatic_http_protocol"
|
||||
description = "HTTP BitTorrent tracker protocol"
|
||||
keywords = ["http", "protocol", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_http_protocol"
|
||||
|
||||
[[bench]]
|
||||
name = "bench_request_from_bytes"
|
||||
path = "benches/bench_request_from_bytes.rs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "bench_announce_response_to_bytes"
|
||||
path = "benches/bench_announce_response_to_bytes.rs"
|
||||
harness = false
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1"
|
||||
compact_str = { version = "0.7", features = ["serde"] }
|
||||
hex = { version = "0.4", default-features = false }
|
||||
httparse = "1"
|
||||
itoa = "1"
|
||||
log = "0.4"
|
||||
memchr = "2"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_bencode = "0.2"
|
||||
urlencoding = "2"
|
||||
|
||||
[dev-dependencies]
|
||||
bendy = { version = "0.4.0-beta.2", features = ["std", "serde"] }
|
||||
criterion = "0.4"
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
use std::net::Ipv4Addr;
|
||||
use std::time::Duration;
|
||||
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
use aquatic_http_protocol::response::*;
|
||||
|
||||
pub fn bench(c: &mut Criterion) {
|
||||
let mut peers = Vec::new();
|
||||
|
||||
for i in 0..100 {
|
||||
peers.push(ResponsePeer {
|
||||
ip_address: Ipv4Addr::new(127, 0, 0, i),
|
||||
port: i as u16,
|
||||
})
|
||||
}
|
||||
|
||||
let announce_response = AnnounceResponse {
|
||||
announce_interval: 120,
|
||||
complete: 100,
|
||||
incomplete: 500,
|
||||
peers: ResponsePeerListV4(peers),
|
||||
peers6: ResponsePeerListV6(Vec::new()),
|
||||
warning_message: None,
|
||||
};
|
||||
|
||||
let response = Response::Announce(announce_response);
|
||||
|
||||
let mut buffer = [0u8; 4096];
|
||||
let mut buffer = ::std::io::Cursor::new(&mut buffer[..]);
|
||||
|
||||
c.bench_function("announce-response-to-bytes", |b| {
|
||||
b.iter(|| {
|
||||
buffer.set_position(0);
|
||||
|
||||
Response::write(black_box(&response), black_box(&mut buffer)).unwrap();
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default()
|
||||
.sample_size(1000)
|
||||
.measurement_time(Duration::from_secs(180))
|
||||
.significance_level(0.01);
|
||||
targets = bench
|
||||
}
|
||||
criterion_main!(benches);
|
||||
22
crates/http_protocol/benches/bench_request_from_bytes.rs
Normal file
22
crates/http_protocol/benches/bench_request_from_bytes.rs
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use std::time::Duration;
|
||||
|
||||
use aquatic_http_protocol::request::Request;
|
||||
|
||||
static INPUT: &[u8] = b"GET /announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-TR2940-5ert69muw5t8&port=11000&uploaded=0&downloaded=0&left=0&numwant=0&key=3ab4b977&compact=1&supportcrypto=1&event=stopped HTTP/1.1\r\n\r\n";
|
||||
|
||||
pub fn bench(c: &mut Criterion) {
|
||||
c.bench_function("request-from-bytes", |b| {
|
||||
b.iter(|| Request::from_bytes(black_box(INPUT)))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group! {
|
||||
name = benches;
|
||||
config = Criterion::default()
|
||||
.sample_size(1000)
|
||||
.measurement_time(Duration::from_secs(180))
|
||||
.significance_level(0.01);
|
||||
targets = bench
|
||||
}
|
||||
criterion_main!(benches);
|
||||
102
crates/http_protocol/src/common.rs
Normal file
102
crates/http_protocol/src/common.rs
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::utils::*;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct PeerId(
|
||||
#[serde(
|
||||
serialize_with = "serialize_20_bytes",
|
||||
deserialize_with = "deserialize_20_bytes"
|
||||
)]
|
||||
pub [u8; 20],
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct InfoHash(
|
||||
#[serde(
|
||||
serialize_with = "serialize_20_bytes",
|
||||
deserialize_with = "deserialize_20_bytes"
|
||||
)]
|
||||
pub [u8; 20],
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum AnnounceEvent {
|
||||
Started,
|
||||
Stopped,
|
||||
Completed,
|
||||
Empty,
|
||||
}
|
||||
|
||||
impl Default for AnnounceEvent {
|
||||
fn default() -> Self {
|
||||
Self::Empty
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AnnounceEvent {
|
||||
type Err = String;
|
||||
|
||||
fn from_str(value: &str) -> std::result::Result<Self, String> {
|
||||
match value {
|
||||
"started" => Ok(Self::Started),
|
||||
"stopped" => Ok(Self::Stopped),
|
||||
"completed" => Ok(Self::Completed),
|
||||
"empty" => Ok(Self::Empty),
|
||||
value => Err(format!("Unknown value: {}", value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AnnounceEvent {
|
||||
pub fn as_str(&self) -> Option<&str> {
|
||||
match self {
|
||||
Self::Started => Some("started"),
|
||||
Self::Stopped => Some("stopped"),
|
||||
Self::Completed => Some("completed"),
|
||||
Self::Empty => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for InfoHash {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut arr = [b'0'; 20];
|
||||
|
||||
for byte in arr.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(arr)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for PeerId {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut arr = [b'0'; 20];
|
||||
|
||||
for byte in arr.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(arr)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for AnnounceEvent {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
match (bool::arbitrary(g), bool::arbitrary(g)) {
|
||||
(false, false) => Self::Started,
|
||||
(true, false) => Self::Started,
|
||||
(false, true) => Self::Completed,
|
||||
(true, true) => Self::Empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
4
crates/http_protocol/src/lib.rs
Normal file
4
crates/http_protocol/src/lib.rs
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
pub mod common;
|
||||
pub mod request;
|
||||
pub mod response;
|
||||
mod utils;
|
||||
467
crates/http_protocol/src/request.rs
Normal file
467
crates/http_protocol/src/request.rs
Normal file
|
|
@ -0,0 +1,467 @@
|
|||
use std::io::Write;
|
||||
|
||||
use anyhow::Context;
|
||||
use compact_str::CompactString;
|
||||
|
||||
use super::common::*;
|
||||
use super::utils::*;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct AnnounceRequest {
|
||||
pub info_hash: InfoHash,
|
||||
pub peer_id: PeerId,
|
||||
pub port: u16,
|
||||
pub bytes_uploaded: usize,
|
||||
pub bytes_downloaded: usize,
|
||||
pub bytes_left: usize,
|
||||
pub event: AnnounceEvent,
|
||||
/// Number of response peers wanted
|
||||
pub numwant: Option<usize>,
|
||||
pub key: Option<CompactString>,
|
||||
}
|
||||
|
||||
impl AnnounceRequest {
|
||||
fn write<W: Write>(&self, output: &mut W, url_suffix: &[u8]) -> ::std::io::Result<()> {
|
||||
output.write_all(b"GET /announce")?;
|
||||
output.write_all(url_suffix)?;
|
||||
output.write_all(b"?info_hash=")?;
|
||||
urlencode_20_bytes(self.info_hash.0, output)?;
|
||||
|
||||
output.write_all(b"&peer_id=")?;
|
||||
urlencode_20_bytes(self.peer_id.0, output)?;
|
||||
|
||||
output.write_all(b"&port=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.port).as_bytes())?;
|
||||
|
||||
output.write_all(b"&uploaded=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.bytes_uploaded).as_bytes())?;
|
||||
|
||||
output.write_all(b"&downloaded=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.bytes_downloaded).as_bytes())?;
|
||||
|
||||
output.write_all(b"&left=")?;
|
||||
output.write_all(itoa::Buffer::new().format(self.bytes_left).as_bytes())?;
|
||||
|
||||
match self.event {
|
||||
AnnounceEvent::Started => output.write_all(b"&event=started")?,
|
||||
AnnounceEvent::Stopped => output.write_all(b"&event=stopped")?,
|
||||
AnnounceEvent::Completed => output.write_all(b"&event=completed")?,
|
||||
AnnounceEvent::Empty => (),
|
||||
};
|
||||
|
||||
if let Some(numwant) = self.numwant {
|
||||
output.write_all(b"&numwant=")?;
|
||||
output.write_all(itoa::Buffer::new().format(numwant).as_bytes())?;
|
||||
}
|
||||
|
||||
if let Some(ref key) = self.key {
|
||||
output.write_all(b"&key=")?;
|
||||
output.write_all(::urlencoding::encode(key.as_str()).as_bytes())?;
|
||||
}
|
||||
|
||||
// Always ask for compact responses to ease load testing of non-aquatic trackers
|
||||
output.write_all(b"&compact=1")?;
|
||||
|
||||
output.write_all(b" HTTP/1.1\r\nHost: localhost\r\n\r\n")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_query_string(query_string: &str) -> anyhow::Result<Self> {
|
||||
// -- Parse key-value pairs
|
||||
|
||||
let mut opt_info_hash = None;
|
||||
let mut opt_peer_id = None;
|
||||
let mut opt_port = None;
|
||||
let mut opt_bytes_left = None;
|
||||
let mut opt_bytes_uploaded = None;
|
||||
let mut opt_bytes_downloaded = None;
|
||||
let mut event = AnnounceEvent::default();
|
||||
let mut opt_numwant = None;
|
||||
let mut opt_key = None;
|
||||
|
||||
let query_string_bytes = query_string.as_bytes();
|
||||
|
||||
let mut ampersand_iter = ::memchr::memchr_iter(b'&', query_string_bytes);
|
||||
let mut position = 0usize;
|
||||
|
||||
for equal_sign_index in ::memchr::memchr_iter(b'=', query_string_bytes) {
|
||||
let segment_end = ampersand_iter.next().unwrap_or_else(|| query_string.len());
|
||||
|
||||
let key = query_string
|
||||
.get(position..equal_sign_index)
|
||||
.with_context(|| format!("no key at {}..{}", position, equal_sign_index))?;
|
||||
let value = query_string
|
||||
.get(equal_sign_index + 1..segment_end)
|
||||
.with_context(|| {
|
||||
format!("no value at {}..{}", equal_sign_index + 1, segment_end)
|
||||
})?;
|
||||
|
||||
match key {
|
||||
"info_hash" => {
|
||||
let value = urldecode_20_bytes(value)?;
|
||||
|
||||
opt_info_hash = Some(InfoHash(value));
|
||||
}
|
||||
"peer_id" => {
|
||||
let value = urldecode_20_bytes(value)?;
|
||||
|
||||
opt_peer_id = Some(PeerId(value));
|
||||
}
|
||||
"port" => {
|
||||
opt_port = Some(value.parse::<u16>().with_context(|| "parse port")?);
|
||||
}
|
||||
"left" => {
|
||||
opt_bytes_left = Some(value.parse::<usize>().with_context(|| "parse left")?);
|
||||
}
|
||||
"uploaded" => {
|
||||
opt_bytes_uploaded =
|
||||
Some(value.parse::<usize>().with_context(|| "parse uploaded")?);
|
||||
}
|
||||
"downloaded" => {
|
||||
opt_bytes_downloaded =
|
||||
Some(value.parse::<usize>().with_context(|| "parse downloaded")?);
|
||||
}
|
||||
"event" => {
|
||||
event = value
|
||||
.parse::<AnnounceEvent>()
|
||||
.map_err(|err| anyhow::anyhow!("invalid event: {}", err))?;
|
||||
}
|
||||
"compact" => {
|
||||
if value != "1" {
|
||||
return Err(anyhow::anyhow!("compact set, but not to 1"));
|
||||
}
|
||||
}
|
||||
"numwant" => {
|
||||
opt_numwant = Some(value.parse::<usize>().with_context(|| "parse numwant")?);
|
||||
}
|
||||
"key" => {
|
||||
if value.len() > 100 {
|
||||
return Err(anyhow::anyhow!("'key' is too long"));
|
||||
}
|
||||
opt_key = Some(::urlencoding::decode(value)?.into());
|
||||
}
|
||||
k => {
|
||||
::log::debug!("ignored unrecognized key: {}", k)
|
||||
}
|
||||
}
|
||||
|
||||
if segment_end == query_string.len() {
|
||||
break;
|
||||
} else {
|
||||
position = segment_end + 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(AnnounceRequest {
|
||||
info_hash: opt_info_hash.with_context(|| "no info_hash")?,
|
||||
peer_id: opt_peer_id.with_context(|| "no peer_id")?,
|
||||
port: opt_port.with_context(|| "no port")?,
|
||||
bytes_uploaded: opt_bytes_uploaded.with_context(|| "no uploaded")?,
|
||||
bytes_downloaded: opt_bytes_downloaded.with_context(|| "no downloaded")?,
|
||||
bytes_left: opt_bytes_left.with_context(|| "no left")?,
|
||||
event,
|
||||
numwant: opt_numwant,
|
||||
key: opt_key,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ScrapeRequest {
|
||||
pub info_hashes: Vec<InfoHash>,
|
||||
}
|
||||
|
||||
impl ScrapeRequest {
|
||||
fn write<W: Write>(&self, output: &mut W, url_suffix: &[u8]) -> ::std::io::Result<()> {
|
||||
output.write_all(b"GET /scrape")?;
|
||||
output.write_all(url_suffix)?;
|
||||
output.write_all(b"?")?;
|
||||
|
||||
let mut first = true;
|
||||
|
||||
for info_hash in self.info_hashes.iter() {
|
||||
if !first {
|
||||
output.write_all(b"&")?;
|
||||
}
|
||||
|
||||
output.write_all(b"info_hash=")?;
|
||||
urlencode_20_bytes(info_hash.0, output)?;
|
||||
|
||||
first = false;
|
||||
}
|
||||
|
||||
output.write_all(b" HTTP/1.1\r\nHost: localhost\r\n\r\n")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_query_string(query_string: &str) -> anyhow::Result<Self> {
|
||||
// -- Parse key-value pairs
|
||||
|
||||
let mut info_hashes = Vec::new();
|
||||
|
||||
let query_string_bytes = query_string.as_bytes();
|
||||
|
||||
let mut ampersand_iter = ::memchr::memchr_iter(b'&', query_string_bytes);
|
||||
let mut position = 0usize;
|
||||
|
||||
for equal_sign_index in ::memchr::memchr_iter(b'=', query_string_bytes) {
|
||||
let segment_end = ampersand_iter.next().unwrap_or_else(|| query_string.len());
|
||||
|
||||
let key = query_string
|
||||
.get(position..equal_sign_index)
|
||||
.with_context(|| format!("no key at {}..{}", position, equal_sign_index))?;
|
||||
let value = query_string
|
||||
.get(equal_sign_index + 1..segment_end)
|
||||
.with_context(|| {
|
||||
format!("no value at {}..{}", equal_sign_index + 1, segment_end)
|
||||
})?;
|
||||
|
||||
match key {
|
||||
"info_hash" => {
|
||||
let value = urldecode_20_bytes(value)?;
|
||||
|
||||
info_hashes.push(InfoHash(value));
|
||||
}
|
||||
k => {
|
||||
::log::debug!("ignored unrecognized key: {}", k)
|
||||
}
|
||||
}
|
||||
|
||||
if segment_end == query_string.len() {
|
||||
break;
|
||||
} else {
|
||||
position = segment_end + 1;
|
||||
}
|
||||
}
|
||||
|
||||
if info_hashes.is_empty() {
|
||||
return Err(anyhow::anyhow!("No info hashes sent"));
|
||||
}
|
||||
|
||||
Ok(ScrapeRequest { info_hashes })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum RequestParseError {
|
||||
NeedMoreData,
|
||||
Invalid(anyhow::Error),
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for RequestParseError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::NeedMoreData => write!(f, "Incomplete request, more data needed"),
|
||||
Self::Invalid(err) => write!(f, "Invalid request: {:#}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::error::Error for RequestParseError {}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Request {
|
||||
Announce(AnnounceRequest),
|
||||
Scrape(ScrapeRequest),
|
||||
}
|
||||
|
||||
impl Request {
|
||||
/// Parse Request from HTTP request bytes
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, RequestParseError> {
|
||||
let mut headers = [httparse::EMPTY_HEADER; 16];
|
||||
let mut http_request = httparse::Request::new(&mut headers);
|
||||
|
||||
match http_request.parse(bytes) {
|
||||
Ok(httparse::Status::Complete(_)) => {
|
||||
if let Some(path) = http_request.path {
|
||||
Self::from_http_get_path(path).map_err(RequestParseError::Invalid)
|
||||
} else {
|
||||
Err(RequestParseError::Invalid(anyhow::anyhow!("no http path")))
|
||||
}
|
||||
}
|
||||
Ok(httparse::Status::Partial) => Err(RequestParseError::NeedMoreData),
|
||||
Err(err) => Err(RequestParseError::Invalid(anyhow::Error::from(err))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse Request from http GET path (`/announce?info_hash=...`)
|
||||
///
|
||||
/// Existing serde-url decode crates were insufficient, so the decision was
|
||||
/// made to create a custom parser. serde_urlencoded doesn't support multiple
|
||||
/// values with same key, and serde_qs pulls in lots of dependencies. Both
|
||||
/// would need preprocessing for the binary format used for info_hash and
|
||||
/// peer_id.
|
||||
///
|
||||
/// The info hashes and peer id's that are received are url-encoded byte
|
||||
/// by byte, e.g., %fa for byte 0xfa. However, they need to be parsed as
|
||||
/// UTF-8 string, meaning that non-ascii bytes are invalid characters.
|
||||
/// Therefore, these bytes must be converted to their equivalent multi-byte
|
||||
/// UTF-8 encodings.
|
||||
pub fn from_http_get_path(path: &str) -> anyhow::Result<Self> {
|
||||
::log::debug!("request GET path: {}", path);
|
||||
|
||||
let mut split_parts = path.splitn(2, '?');
|
||||
|
||||
let location = split_parts.next().with_context(|| "no location")?;
|
||||
let query_string = split_parts.next().with_context(|| "no query string")?;
|
||||
|
||||
if location == "/announce" {
|
||||
Ok(Request::Announce(AnnounceRequest::from_query_string(
|
||||
query_string,
|
||||
)?))
|
||||
} else if location == "/scrape" {
|
||||
Ok(Request::Scrape(ScrapeRequest::from_query_string(
|
||||
query_string,
|
||||
)?))
|
||||
} else {
|
||||
Err(anyhow::anyhow!("Path must be /announce or /scrape"))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, output: &mut W, url_suffix: &[u8]) -> ::std::io::Result<()> {
|
||||
match self {
|
||||
Self::Announce(r) => r.write(output, url_suffix),
|
||||
Self::Scrape(r) => r.write(output, url_suffix),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck::{quickcheck, Arbitrary, Gen, TestResult};
|
||||
|
||||
use super::*;
|
||||
|
||||
static ANNOUNCE_REQUEST_PATH: &str = "/announce?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9&peer_id=-ABC940-5ert69muw5t8&port=12345&uploaded=1&downloaded=2&left=3&numwant=0&key=4ab4b877&compact=1&supportcrypto=1&event=started";
|
||||
static SCRAPE_REQUEST_PATH: &str =
|
||||
"/scrape?info_hash=%04%0bkV%3f%5cr%14%a6%b7%98%adC%c3%c9.%40%24%00%b9";
|
||||
static REFERENCE_INFO_HASH: [u8; 20] = [
|
||||
0x04, 0x0b, b'k', b'V', 0x3f, 0x5c, b'r', 0x14, 0xa6, 0xb7, 0x98, 0xad, b'C', 0xc3, 0xc9,
|
||||
b'.', 0x40, 0x24, 0x00, 0xb9,
|
||||
];
|
||||
static REFERENCE_PEER_ID: [u8; 20] = [
|
||||
b'-', b'A', b'B', b'C', b'9', b'4', b'0', b'-', b'5', b'e', b'r', b't', b'6', b'9', b'm',
|
||||
b'u', b'w', b'5', b't', b'8',
|
||||
];
|
||||
|
||||
fn get_reference_announce_request() -> Request {
|
||||
Request::Announce(AnnounceRequest {
|
||||
info_hash: InfoHash(REFERENCE_INFO_HASH),
|
||||
peer_id: PeerId(REFERENCE_PEER_ID),
|
||||
port: 12345,
|
||||
bytes_uploaded: 1,
|
||||
bytes_downloaded: 2,
|
||||
bytes_left: 3,
|
||||
event: AnnounceEvent::Started,
|
||||
numwant: Some(0),
|
||||
key: Some("4ab4b877".into()),
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_announce_request_from_bytes() {
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
bytes.extend_from_slice(b"GET ");
|
||||
bytes.extend_from_slice(&ANNOUNCE_REQUEST_PATH.as_bytes());
|
||||
bytes.extend_from_slice(b" HTTP/1.1\r\n\r\n");
|
||||
|
||||
let parsed_request = Request::from_bytes(&bytes[..]).unwrap();
|
||||
let reference_request = get_reference_announce_request();
|
||||
|
||||
assert_eq!(parsed_request, reference_request);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scrape_request_from_bytes() {
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
bytes.extend_from_slice(b"GET ");
|
||||
bytes.extend_from_slice(&SCRAPE_REQUEST_PATH.as_bytes());
|
||||
bytes.extend_from_slice(b" HTTP/1.1\r\n\r\n");
|
||||
|
||||
let parsed_request = Request::from_bytes(&bytes[..]).unwrap();
|
||||
let reference_request = Request::Scrape(ScrapeRequest {
|
||||
info_hashes: vec![InfoHash(REFERENCE_INFO_HASH)],
|
||||
});
|
||||
|
||||
assert_eq!(parsed_request, reference_request);
|
||||
}
|
||||
|
||||
impl Arbitrary for AnnounceRequest {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let key: Option<String> = Arbitrary::arbitrary(g);
|
||||
|
||||
AnnounceRequest {
|
||||
info_hash: Arbitrary::arbitrary(g),
|
||||
peer_id: Arbitrary::arbitrary(g),
|
||||
port: Arbitrary::arbitrary(g),
|
||||
bytes_uploaded: Arbitrary::arbitrary(g),
|
||||
bytes_downloaded: Arbitrary::arbitrary(g),
|
||||
bytes_left: Arbitrary::arbitrary(g),
|
||||
event: Arbitrary::arbitrary(g),
|
||||
numwant: Arbitrary::arbitrary(g),
|
||||
key: key.map(|key| key.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for ScrapeRequest {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
ScrapeRequest {
|
||||
info_hashes: Arbitrary::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Arbitrary for Request {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
if Arbitrary::arbitrary(g) {
|
||||
Self::Announce(Arbitrary::arbitrary(g))
|
||||
} else {
|
||||
Self::Scrape(Arbitrary::arbitrary(g))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn quickcheck_serde_identity_request() {
|
||||
fn prop(request: Request) -> TestResult {
|
||||
match request {
|
||||
Request::Announce(AnnounceRequest {
|
||||
key: Some(ref key), ..
|
||||
}) => {
|
||||
if key.len() > 30 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
}
|
||||
Request::Scrape(ScrapeRequest { ref info_hashes }) => {
|
||||
if info_hashes.is_empty() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let mut bytes = Vec::new();
|
||||
|
||||
request.write(&mut bytes, &[]).unwrap();
|
||||
|
||||
let parsed_request = Request::from_bytes(&bytes[..]).unwrap();
|
||||
|
||||
let success = request == parsed_request;
|
||||
|
||||
if !success {
|
||||
println!("request: {:?}", request);
|
||||
println!("parsed request: {:?}", parsed_request);
|
||||
println!("bytes as str: {}", String::from_utf8_lossy(&bytes));
|
||||
}
|
||||
|
||||
TestResult::from_bool(success)
|
||||
}
|
||||
|
||||
quickcheck(prop as fn(Request) -> TestResult);
|
||||
}
|
||||
}
|
||||
335
crates/http_protocol/src/response.rs
Normal file
335
crates/http_protocol/src/response.rs
Normal file
|
|
@ -0,0 +1,335 @@
|
|||
use std::borrow::Cow;
|
||||
use std::io::Write;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use super::common::*;
|
||||
use super::utils::*;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct ResponsePeer<I: Eq> {
|
||||
pub ip_address: I,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(transparent)]
|
||||
pub struct ResponsePeerListV4(
|
||||
#[serde(
|
||||
serialize_with = "serialize_response_peers_ipv4",
|
||||
deserialize_with = "deserialize_response_peers_ipv4"
|
||||
)]
|
||||
pub Vec<ResponsePeer<Ipv4Addr>>,
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
#[serde(transparent)]
|
||||
pub struct ResponsePeerListV6(
|
||||
#[serde(
|
||||
serialize_with = "serialize_response_peers_ipv6",
|
||||
deserialize_with = "deserialize_response_peers_ipv6"
|
||||
)]
|
||||
pub Vec<ResponsePeer<Ipv6Addr>>,
|
||||
);
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScrapeStatistics {
|
||||
pub complete: usize,
|
||||
pub incomplete: usize,
|
||||
pub downloaded: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AnnounceResponse {
|
||||
#[serde(rename = "interval")]
|
||||
pub announce_interval: usize,
|
||||
pub complete: usize,
|
||||
pub incomplete: usize,
|
||||
#[serde(default)]
|
||||
pub peers: ResponsePeerListV4,
|
||||
#[serde(default)]
|
||||
pub peers6: ResponsePeerListV6,
|
||||
// Serialize as string if Some, otherwise skip
|
||||
#[serde(
|
||||
rename = "warning message",
|
||||
skip_serializing_if = "Option::is_none",
|
||||
serialize_with = "serialize_optional_string"
|
||||
)]
|
||||
pub warning_message: Option<String>,
|
||||
}
|
||||
|
||||
impl AnnounceResponse {
|
||||
pub fn write<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
let mut bytes_written = 0usize;
|
||||
|
||||
bytes_written += output.write(b"d8:completei")?;
|
||||
bytes_written += output.write(itoa::Buffer::new().format(self.complete).as_bytes())?;
|
||||
|
||||
bytes_written += output.write(b"e10:incompletei")?;
|
||||
bytes_written += output.write(itoa::Buffer::new().format(self.incomplete).as_bytes())?;
|
||||
|
||||
bytes_written += output.write(b"e8:intervali")?;
|
||||
bytes_written += output.write(
|
||||
itoa::Buffer::new()
|
||||
.format(self.announce_interval)
|
||||
.as_bytes(),
|
||||
)?;
|
||||
|
||||
bytes_written += output.write(b"e5:peers")?;
|
||||
bytes_written += output.write(
|
||||
itoa::Buffer::new()
|
||||
.format(self.peers.0.len() * 6)
|
||||
.as_bytes(),
|
||||
)?;
|
||||
bytes_written += output.write(b":")?;
|
||||
for peer in self.peers.0.iter() {
|
||||
bytes_written += output.write(&u32::from(peer.ip_address).to_be_bytes())?;
|
||||
bytes_written += output.write(&peer.port.to_be_bytes())?;
|
||||
}
|
||||
|
||||
bytes_written += output.write(b"6:peers6")?;
|
||||
bytes_written += output.write(
|
||||
itoa::Buffer::new()
|
||||
.format(self.peers6.0.len() * 18)
|
||||
.as_bytes(),
|
||||
)?;
|
||||
bytes_written += output.write(b":")?;
|
||||
for peer in self.peers6.0.iter() {
|
||||
bytes_written += output.write(&u128::from(peer.ip_address).to_be_bytes())?;
|
||||
bytes_written += output.write(&peer.port.to_be_bytes())?;
|
||||
}
|
||||
|
||||
if let Some(ref warning_message) = self.warning_message {
|
||||
let message_bytes = warning_message.as_bytes();
|
||||
|
||||
bytes_written += output.write(b"15:warning message")?;
|
||||
bytes_written +=
|
||||
output.write(itoa::Buffer::new().format(message_bytes.len()).as_bytes())?;
|
||||
bytes_written += output.write(b":")?;
|
||||
bytes_written += output.write(message_bytes)?;
|
||||
}
|
||||
|
||||
bytes_written += output.write(b"e")?;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ScrapeResponse {
|
||||
/// BTreeMap instead of HashMap since keys need to be serialized in order
|
||||
pub files: BTreeMap<InfoHash, ScrapeStatistics>,
|
||||
}
|
||||
|
||||
impl ScrapeResponse {
|
||||
pub fn write<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
let mut bytes_written = 0usize;
|
||||
|
||||
bytes_written += output.write(b"d5:filesd")?;
|
||||
|
||||
for (info_hash, statistics) in self.files.iter() {
|
||||
bytes_written += output.write(b"20:")?;
|
||||
bytes_written += output.write(&info_hash.0)?;
|
||||
bytes_written += output.write(b"d8:completei")?;
|
||||
bytes_written +=
|
||||
output.write(itoa::Buffer::new().format(statistics.complete).as_bytes())?;
|
||||
bytes_written += output.write(b"e10:downloadedi0e10:incompletei")?;
|
||||
bytes_written +=
|
||||
output.write(itoa::Buffer::new().format(statistics.incomplete).as_bytes())?;
|
||||
bytes_written += output.write(b"ee")?;
|
||||
}
|
||||
|
||||
bytes_written += output.write(b"ee")?;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FailureResponse {
|
||||
#[serde(rename = "failure reason")]
|
||||
pub failure_reason: Cow<'static, str>,
|
||||
}
|
||||
|
||||
impl FailureResponse {
|
||||
pub fn new<S: Into<Cow<'static, str>>>(reason: S) -> Self {
|
||||
Self {
|
||||
failure_reason: reason.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
let mut bytes_written = 0usize;
|
||||
|
||||
let reason_bytes = self.failure_reason.as_bytes();
|
||||
|
||||
bytes_written += output.write(b"d14:failure reason")?;
|
||||
bytes_written += output.write(itoa::Buffer::new().format(reason_bytes.len()).as_bytes())?;
|
||||
bytes_written += output.write(b":")?;
|
||||
bytes_written += output.write(reason_bytes)?;
|
||||
bytes_written += output.write(b"e")?;
|
||||
|
||||
Ok(bytes_written)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum Response {
|
||||
Announce(AnnounceResponse),
|
||||
Scrape(ScrapeResponse),
|
||||
Failure(FailureResponse),
|
||||
}
|
||||
|
||||
impl Response {
|
||||
pub fn write<W: Write>(&self, output: &mut W) -> ::std::io::Result<usize> {
|
||||
match self {
|
||||
Response::Announce(r) => r.write(output),
|
||||
Response::Failure(r) => r.write(output),
|
||||
Response::Scrape(r) => r.write(output),
|
||||
}
|
||||
}
|
||||
pub fn from_bytes(bytes: &[u8]) -> Result<Self, ::serde_bencode::Error> {
|
||||
::serde_bencode::from_bytes(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeer<Ipv4Addr> {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
ip_address: Ipv4Addr::arbitrary(g),
|
||||
port: u16::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeer<Ipv6Addr> {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
ip_address: Ipv6Addr::arbitrary(g),
|
||||
port: u16::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeerListV4 {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self(Vec::arbitrary(g))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ResponsePeerListV6 {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self(Vec::arbitrary(g))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ScrapeStatistics {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
complete: usize::arbitrary(g),
|
||||
incomplete: usize::arbitrary(g),
|
||||
downloaded: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for AnnounceResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
announce_interval: usize::arbitrary(g),
|
||||
complete: usize::arbitrary(g),
|
||||
incomplete: usize::arbitrary(g),
|
||||
peers: ResponsePeerListV4::arbitrary(g),
|
||||
peers6: ResponsePeerListV6::arbitrary(g),
|
||||
warning_message: quickcheck::Arbitrary::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for ScrapeResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
files: BTreeMap::arbitrary(g),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for FailureResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
failure_reason: String::arbitrary(g).into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck_macros::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[quickcheck]
|
||||
fn test_announce_response_to_bytes(response: AnnounceResponse) -> bool {
|
||||
let reference = bendy::serde::to_bytes(&Response::Announce(response.clone())).unwrap();
|
||||
|
||||
let mut hand_written = Vec::new();
|
||||
|
||||
response.write(&mut hand_written).unwrap();
|
||||
|
||||
let success = hand_written == reference;
|
||||
|
||||
if !success {
|
||||
println!("reference: {}", String::from_utf8_lossy(&reference));
|
||||
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_scrape_response_to_bytes(response: ScrapeResponse) -> bool {
|
||||
let reference = bendy::serde::to_bytes(&Response::Scrape(response.clone())).unwrap();
|
||||
|
||||
let mut hand_written = Vec::new();
|
||||
|
||||
response.write(&mut hand_written).unwrap();
|
||||
|
||||
let success = hand_written == reference;
|
||||
|
||||
if !success {
|
||||
println!("reference: {}", String::from_utf8_lossy(&reference));
|
||||
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_failure_response_to_bytes(response: FailureResponse) -> bool {
|
||||
let reference = bendy::serde::to_bytes(&Response::Failure(response.clone())).unwrap();
|
||||
|
||||
let mut hand_written = Vec::new();
|
||||
|
||||
response.write(&mut hand_written).unwrap();
|
||||
|
||||
let success = hand_written == reference;
|
||||
|
||||
if !success {
|
||||
println!("reference: {}", String::from_utf8_lossy(&reference));
|
||||
println!("hand_written: {}", String::from_utf8_lossy(&hand_written));
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
}
|
||||
334
crates/http_protocol/src/utils.rs
Normal file
334
crates/http_protocol/src/utils.rs
Normal file
|
|
@ -0,0 +1,334 @@
|
|||
use std::io::Write;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
use anyhow::Context;
|
||||
use serde::{de::Visitor, Deserializer, Serializer};
|
||||
|
||||
use super::response::ResponsePeer;
|
||||
|
||||
pub fn urlencode_20_bytes(input: [u8; 20], output: &mut impl Write) -> ::std::io::Result<()> {
|
||||
let mut tmp = [b'%'; 60];
|
||||
|
||||
for i in 0..input.len() {
|
||||
hex::encode_to_slice(&input[i..i + 1], &mut tmp[i * 3 + 1..i * 3 + 3]).unwrap();
|
||||
}
|
||||
|
||||
output.write_all(&tmp)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn urldecode_20_bytes(value: &str) -> anyhow::Result<[u8; 20]> {
|
||||
let mut out_arr = [0u8; 20];
|
||||
|
||||
let mut chars = value.chars();
|
||||
|
||||
for i in 0..20 {
|
||||
let c = chars.next().with_context(|| "less than 20 chars")?;
|
||||
|
||||
if c as u32 > 255 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"character not in single byte range: {:#?}",
|
||||
c
|
||||
));
|
||||
}
|
||||
|
||||
if c == '%' {
|
||||
let first = chars
|
||||
.next()
|
||||
.with_context(|| "missing first urldecode char in pair")?;
|
||||
let second = chars
|
||||
.next()
|
||||
.with_context(|| "missing second urldecode char in pair")?;
|
||||
|
||||
let hex = [first as u8, second as u8];
|
||||
|
||||
hex::decode_to_slice(&hex, &mut out_arr[i..i + 1])
|
||||
.map_err(|err| anyhow::anyhow!("hex decode error: {:?}", err))?;
|
||||
} else {
|
||||
out_arr[i] = c as u8;
|
||||
}
|
||||
}
|
||||
|
||||
if chars.next().is_some() {
|
||||
return Err(anyhow::anyhow!("more than 20 chars"));
|
||||
}
|
||||
|
||||
Ok(out_arr)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn serialize_optional_string<S>(v: &Option<String>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match v {
|
||||
Some(s) => serializer.serialize_str(s.as_str()),
|
||||
None => Err(serde::ser::Error::custom("use skip_serializing_if")),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn serialize_20_bytes<S>(bytes: &[u8; 20], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_bytes(bytes)
|
||||
}
|
||||
|
||||
struct TwentyByteVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for TwentyByteVisitor {
|
||||
type Value = [u8; 20];
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("20 bytes")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E>
|
||||
where
|
||||
E: ::serde::de::Error,
|
||||
{
|
||||
if value.len() != 20 {
|
||||
return Err(::serde::de::Error::custom("not 20 bytes"));
|
||||
}
|
||||
|
||||
let mut arr = [0u8; 20];
|
||||
|
||||
arr.copy_from_slice(value);
|
||||
|
||||
Ok(arr)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize_20_bytes<'de, D>(deserializer: D) -> Result<[u8; 20], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_any(TwentyByteVisitor)
|
||||
}
|
||||
|
||||
pub fn serialize_response_peers_ipv4<S>(
|
||||
response_peers: &[ResponsePeer<Ipv4Addr>],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut bytes = Vec::with_capacity(response_peers.len() * 6);
|
||||
|
||||
for peer in response_peers {
|
||||
bytes.extend_from_slice(&u32::from(peer.ip_address).to_be_bytes());
|
||||
bytes.extend_from_slice(&peer.port.to_be_bytes())
|
||||
}
|
||||
|
||||
serializer.serialize_bytes(&bytes)
|
||||
}
|
||||
|
||||
pub fn serialize_response_peers_ipv6<S>(
|
||||
response_peers: &[ResponsePeer<Ipv6Addr>],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut bytes = Vec::with_capacity(response_peers.len() * 6);
|
||||
|
||||
for peer in response_peers {
|
||||
bytes.extend_from_slice(&u128::from(peer.ip_address).to_be_bytes());
|
||||
bytes.extend_from_slice(&peer.port.to_be_bytes())
|
||||
}
|
||||
|
||||
serializer.serialize_bytes(&bytes)
|
||||
}
|
||||
|
||||
struct ResponsePeersIpv4Visitor;
|
||||
|
||||
impl<'de> Visitor<'de> for ResponsePeersIpv4Visitor {
|
||||
type Value = Vec<ResponsePeer<Ipv4Addr>>;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("byte-encoded ipv4 address-port pairs")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E>
|
||||
where
|
||||
E: ::serde::de::Error,
|
||||
{
|
||||
let chunks = value.chunks_exact(6);
|
||||
|
||||
if !chunks.remainder().is_empty() {
|
||||
return Err(::serde::de::Error::custom("trailing bytes"));
|
||||
}
|
||||
|
||||
let mut ip_bytes = [0u8; 4];
|
||||
let mut port_bytes = [0u8; 2];
|
||||
|
||||
let peers = chunks
|
||||
.into_iter()
|
||||
.map(|chunk| {
|
||||
ip_bytes.copy_from_slice(&chunk[0..4]);
|
||||
port_bytes.copy_from_slice(&chunk[4..6]);
|
||||
|
||||
ResponsePeer {
|
||||
ip_address: Ipv4Addr::from(u32::from_be_bytes(ip_bytes)),
|
||||
port: u16::from_be_bytes(port_bytes),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(peers)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize_response_peers_ipv4<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<Vec<ResponsePeer<Ipv4Addr>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_any(ResponsePeersIpv4Visitor)
|
||||
}
|
||||
|
||||
struct ResponsePeersIpv6Visitor;
|
||||
|
||||
impl<'de> Visitor<'de> for ResponsePeersIpv6Visitor {
|
||||
type Value = Vec<ResponsePeer<Ipv6Addr>>;
|
||||
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
formatter.write_str("byte-encoded ipv6 address-port pairs")
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E>
|
||||
where
|
||||
E: ::serde::de::Error,
|
||||
{
|
||||
let chunks = value.chunks_exact(18);
|
||||
|
||||
if !chunks.remainder().is_empty() {
|
||||
return Err(::serde::de::Error::custom("trailing bytes"));
|
||||
}
|
||||
|
||||
let mut ip_bytes = [0u8; 16];
|
||||
let mut port_bytes = [0u8; 2];
|
||||
|
||||
let peers = chunks
|
||||
.into_iter()
|
||||
.map(|chunk| {
|
||||
ip_bytes.copy_from_slice(&chunk[0..16]);
|
||||
port_bytes.copy_from_slice(&chunk[16..18]);
|
||||
|
||||
ResponsePeer {
|
||||
ip_address: Ipv6Addr::from(u128::from_be_bytes(ip_bytes)),
|
||||
port: u16::from_be_bytes(port_bytes),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(peers)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn deserialize_response_peers_ipv6<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<Vec<ResponsePeer<Ipv6Addr>>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_any(ResponsePeersIpv6Visitor)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck_macros::*;
|
||||
|
||||
use crate::common::InfoHash;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_urlencode_20_bytes() {
|
||||
let mut input = [0u8; 20];
|
||||
|
||||
for (i, b) in input.iter_mut().enumerate() {
|
||||
*b = i as u8 % 10;
|
||||
}
|
||||
|
||||
let mut output = Vec::new();
|
||||
|
||||
urlencode_20_bytes(input, &mut output).unwrap();
|
||||
|
||||
assert_eq!(output.len(), 60);
|
||||
|
||||
for (i, chunk) in output.chunks_exact(3).enumerate() {
|
||||
// Not perfect but should do the job
|
||||
let reference = [b'%', b'0', input[i] + 48];
|
||||
|
||||
let success = chunk == reference;
|
||||
|
||||
if !success {
|
||||
println!("failing index: {}", i);
|
||||
}
|
||||
|
||||
assert_eq!(chunk, reference);
|
||||
}
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_urlencode_urldecode_20_bytes(
|
||||
a: u8,
|
||||
b: u8,
|
||||
c: u8,
|
||||
d: u8,
|
||||
e: u8,
|
||||
f: u8,
|
||||
g: u8,
|
||||
h: u8,
|
||||
) -> bool {
|
||||
let input: [u8; 20] = [a, b, c, d, e, f, g, h, b, c, d, a, e, f, g, h, a, b, d, c];
|
||||
|
||||
let mut output = Vec::new();
|
||||
|
||||
urlencode_20_bytes(input, &mut output).unwrap();
|
||||
|
||||
let s = ::std::str::from_utf8(&output).unwrap();
|
||||
|
||||
let decoded = urldecode_20_bytes(s).unwrap();
|
||||
|
||||
assert_eq!(input, decoded);
|
||||
|
||||
input == decoded
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_serde_response_peers_ipv4(peers: Vec<ResponsePeer<Ipv4Addr>>) -> bool {
|
||||
let serialized = bendy::serde::to_bytes(&peers).unwrap();
|
||||
let deserialized: Vec<ResponsePeer<Ipv4Addr>> =
|
||||
::bendy::serde::from_bytes(&serialized).unwrap();
|
||||
|
||||
peers == deserialized
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_serde_response_peers_ipv6(peers: Vec<ResponsePeer<Ipv6Addr>>) -> bool {
|
||||
let serialized = bendy::serde::to_bytes(&peers).unwrap();
|
||||
let deserialized: Vec<ResponsePeer<Ipv6Addr>> =
|
||||
::bendy::serde::from_bytes(&serialized).unwrap();
|
||||
|
||||
peers == deserialized
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_serde_info_hash(info_hash: InfoHash) -> bool {
|
||||
let serialized = bendy::serde::to_bytes(&info_hash).unwrap();
|
||||
let deserialized: InfoHash = ::bendy::serde::from_bytes(&serialized).unwrap();
|
||||
|
||||
info_hash == deserialized
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
{"group_id":"announce-response-to-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"announce-response-to-bytes","directory_name":"announce-response-to-bytes","title":"announce-response-to-bytes"}
|
||||
|
|
@ -0,0 +1 @@
|
|||
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":6033.211414448978,"upper_bound":6077.812796004471},"point_estimate":6054.625623439862,"standard_error":11.387162302248655},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":5978.799232230455,"upper_bound":6005.189535363421},"point_estimate":5992.745967541798,"standard_error":6.185398365563177},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":157.08470879401094,"upper_bound":190.1634482791119},"point_estimate":175.51713287349847,"standard_error":8.3821979113297},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":6052.909623777413,"upper_bound":6106.324900686703},"point_estimate":6078.257114077077,"standard_error":13.648790489926581},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":285.8045348063516,"upper_bound":442.7497149360172},"point_estimate":363.44843558752416,"standard_error":40.16921333191484}}
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1 @@
|
|||
[5184.137608004838,5534.60305616611,6469.177584596171,6819.643032757444]
|
||||
|
|
@ -0,0 +1 @@
|
|||
{"group_id":"announce-response-to-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"announce-response-to-bytes","directory_name":"announce-response-to-bytes","title":"announce-response-to-bytes"}
|
||||
|
|
@ -0,0 +1 @@
|
|||
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":816.5793263757998,"upper_bound":829.8277072322014},"point_estimate":823.0324170546021,"standard_error":3.3713205895235987},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":785.8508214740125,"upper_bound":790.3983678702459},"point_estimate":787.3168084640594,"standard_error":1.2374611050301572},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":34.7791109454705,"upper_bound":44.243901222281416},"point_estimate":40.0754205033,"standard_error":2.42022909705503},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":811.6440256190905,"upper_bound":823.2086243755138},"point_estimate":817.2846212085899,"standard_error":2.95472132616886},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":92.90279248590167,"upper_bound":121.73387529852707},"point_estimate":107.2944955313405,"standard_error":7.401429548815175}}
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1 @@
|
|||
[565.2398956433274,665.7749574634894,933.8684556505881,1034.40351747075]
|
||||
|
|
@ -0,0 +1 @@
|
|||
{"group_id":"request-from-bytes","function_id":null,"value_str":null,"throughput":null,"full_id":"request-from-bytes","directory_name":"request-from-bytes","title":"request-from-bytes"}
|
||||
|
|
@ -0,0 +1 @@
|
|||
{"mean":{"confidence_interval":{"confidence_level":0.95,"lower_bound":791.6783637138329,"upper_bound":798.2060382161882},"point_estimate":794.7777653239414,"standard_error":1.670679553768017},"median":{"confidence_interval":{"confidence_level":0.95,"lower_bound":786.1377247215969,"upper_bound":789.3747173913043},"point_estimate":788.2154281612928,"standard_error":0.9080984924572599},"median_abs_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":34.47577000388577,"upper_bound":38.99231743541378},"point_estimate":37.25560574108035,"standard_error":1.1689453074940308},"slope":{"confidence_interval":{"confidence_level":0.95,"lower_bound":791.1964524096214,"upper_bound":798.189227060581},"point_estimate":794.5503586699593,"standard_error":1.785366051793957},"std_dev":{"confidence_interval":{"confidence_level":0.95,"lower_bound":41.22148757811178,"upper_bound":64.85026519223337},"point_estimate":52.942361554527636,"standard_error":6.055601310575156}}
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because one or more lines are too long
|
|
@ -0,0 +1 @@
|
|||
[635.6000013134935,698.449239826088,866.0472091930068,928.8964477056013]
|
||||
23
crates/peer_id/Cargo.toml
Normal file
23
crates/peer_id/Cargo.toml
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "aquatic_peer_id"
|
||||
description = "BitTorrent peer ID handling"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_peer_id"
|
||||
|
||||
[features]
|
||||
default = ["quickcheck"]
|
||||
|
||||
[dependencies]
|
||||
compact_str = "0.7"
|
||||
hex = "0.4"
|
||||
regex = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
quickcheck = { version = "1", optional = true }
|
||||
277
crates/peer_id/src/lib.rs
Normal file
277
crates/peer_id/src/lib.rs
Normal file
|
|
@ -0,0 +1,277 @@
|
|||
use std::{borrow::Cow, fmt::Display, sync::OnceLock};
|
||||
|
||||
use compact_str::{format_compact, CompactString};
|
||||
use regex::bytes::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub struct PeerId(pub [u8; 20]);
|
||||
|
||||
impl PeerId {
|
||||
pub fn client(&self) -> PeerClient {
|
||||
PeerClient::from_peer_id(self)
|
||||
}
|
||||
pub fn first_8_bytes_hex(&self) -> CompactString {
|
||||
let mut buf = [0u8; 16];
|
||||
|
||||
hex::encode_to_slice(&self.0[..8], &mut buf)
|
||||
.expect("PeerId.first_8_bytes_hex buffer too small");
|
||||
|
||||
CompactString::from_utf8_lossy(&buf)
|
||||
}
|
||||
}
|
||||
|
||||
#[non_exhaustive]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum PeerClient {
|
||||
BitTorrent(CompactString),
|
||||
Deluge(CompactString),
|
||||
LibTorrentRakshasa(CompactString),
|
||||
LibTorrentRasterbar(CompactString),
|
||||
QBitTorrent(CompactString),
|
||||
Transmission(CompactString),
|
||||
UTorrent(CompactString),
|
||||
UTorrentEmbedded(CompactString),
|
||||
UTorrentMac(CompactString),
|
||||
UTorrentWeb(CompactString),
|
||||
Vuze(CompactString),
|
||||
WebTorrent(CompactString),
|
||||
WebTorrentDesktop(CompactString),
|
||||
Mainline(CompactString),
|
||||
OtherWithPrefixAndVersion {
|
||||
prefix: CompactString,
|
||||
version: CompactString,
|
||||
},
|
||||
OtherWithPrefix(CompactString),
|
||||
Other,
|
||||
}
|
||||
|
||||
impl PeerClient {
|
||||
pub fn from_prefix_and_version(prefix: &[u8], version: &[u8]) -> Self {
|
||||
fn three_digits_plus_prerelease(v1: char, v2: char, v3: char, v4: char) -> CompactString {
|
||||
let prerelease: Cow<str> = match v4 {
|
||||
'd' | 'D' => " dev".into(),
|
||||
'a' | 'A' => " alpha".into(),
|
||||
'b' | 'B' => " beta".into(),
|
||||
'r' | 'R' => " rc".into(),
|
||||
's' | 'S' => " stable".into(),
|
||||
other => format_compact!("{}", other).into(),
|
||||
};
|
||||
|
||||
format_compact!("{}.{}.{}{}", v1, v2, v3, prerelease)
|
||||
}
|
||||
|
||||
fn webtorrent(v1: char, v2: char, v3: char, v4: char) -> CompactString {
|
||||
let major = if v1 == '0' {
|
||||
format_compact!("{}", v2)
|
||||
} else {
|
||||
format_compact!("{}{}", v1, v2)
|
||||
};
|
||||
|
||||
let minor = if v3 == '0' {
|
||||
format_compact!("{}", v4)
|
||||
} else {
|
||||
format_compact!("{}{}", v3, v4)
|
||||
};
|
||||
|
||||
format_compact!("{}.{}", major, minor)
|
||||
}
|
||||
|
||||
if let [v1, v2, v3, v4] = version {
|
||||
let (v1, v2, v3, v4) = (*v1 as char, *v2 as char, *v3 as char, *v4 as char);
|
||||
|
||||
match prefix {
|
||||
b"AZ" => Self::Vuze(format_compact!("{}.{}.{}.{}", v1, v2, v3, v4)),
|
||||
b"BT" => Self::BitTorrent(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"DE" => Self::Deluge(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"lt" => Self::LibTorrentRakshasa(format_compact!("{}.{}{}.{}", v1, v2, v3, v4)),
|
||||
b"LT" => Self::LibTorrentRasterbar(format_compact!("{}.{}{}.{}", v1, v2, v3, v4)),
|
||||
b"qB" => Self::QBitTorrent(format_compact!("{}.{}.{}", v1, v2, v3)),
|
||||
b"TR" => {
|
||||
let v = match (v1, v2, v3, v4) {
|
||||
('0', '0', '0', v4) => format_compact!("0.{}", v4),
|
||||
('0', '0', v3, v4) => format_compact!("0.{}{}", v3, v4),
|
||||
_ => format_compact!("{}.{}{}", v1, v2, v3),
|
||||
};
|
||||
|
||||
Self::Transmission(v)
|
||||
}
|
||||
b"UE" => Self::UTorrentEmbedded(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"UM" => Self::UTorrentMac(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"UT" => Self::UTorrent(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"UW" => Self::UTorrentWeb(three_digits_plus_prerelease(v1, v2, v3, v4)),
|
||||
b"WD" => Self::WebTorrentDesktop(webtorrent(v1, v2, v3, v4)),
|
||||
b"WW" => Self::WebTorrent(webtorrent(v1, v2, v3, v4)),
|
||||
_ => Self::OtherWithPrefixAndVersion {
|
||||
prefix: CompactString::from_utf8_lossy(prefix),
|
||||
version: CompactString::from_utf8_lossy(version),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
match (prefix, version) {
|
||||
(b"M", &[major, b'-', minor, b'-', patch, b'-']) => Self::Mainline(
|
||||
format_compact!("{}.{}.{}", major as char, minor as char, patch as char),
|
||||
),
|
||||
(b"M", &[major, b'-', minor1, minor2, b'-', patch]) => {
|
||||
Self::Mainline(format_compact!(
|
||||
"{}.{}{}.{}",
|
||||
major as char,
|
||||
minor1 as char,
|
||||
minor2 as char,
|
||||
patch as char
|
||||
))
|
||||
}
|
||||
_ => Self::OtherWithPrefixAndVersion {
|
||||
prefix: CompactString::from_utf8_lossy(prefix),
|
||||
version: CompactString::from_utf8_lossy(version),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_peer_id(peer_id: &PeerId) -> Self {
|
||||
static AZ_RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
if let Some(caps) = AZ_RE
|
||||
.get_or_init(|| {
|
||||
Regex::new(r"^\-(?P<name>[a-zA-Z]{2})(?P<version>[0-9]{3}[0-9a-zA-Z])")
|
||||
.expect("compile AZ_RE regex")
|
||||
})
|
||||
.captures(&peer_id.0)
|
||||
{
|
||||
return Self::from_prefix_and_version(&caps["name"], &caps["version"]);
|
||||
}
|
||||
|
||||
static MAINLINE_RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
if let Some(caps) = MAINLINE_RE
|
||||
.get_or_init(|| {
|
||||
Regex::new(r"^(?P<name>[a-zA-Z])(?P<version>[0-9\-]{6})\-")
|
||||
.expect("compile MAINLINE_RE regex")
|
||||
})
|
||||
.captures(&peer_id.0)
|
||||
{
|
||||
return Self::from_prefix_and_version(&caps["name"], &caps["version"]);
|
||||
}
|
||||
|
||||
static PREFIX_RE: OnceLock<Regex> = OnceLock::new();
|
||||
|
||||
if let Some(caps) = PREFIX_RE
|
||||
.get_or_init(|| {
|
||||
Regex::new(r"^(?P<prefix>[a-zA-Z0-9\-]+)\-").expect("compile PREFIX_RE regex")
|
||||
})
|
||||
.captures(&peer_id.0)
|
||||
{
|
||||
return Self::OtherWithPrefix(CompactString::from_utf8_lossy(&caps["prefix"]));
|
||||
}
|
||||
|
||||
Self::Other
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for PeerClient {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::BitTorrent(v) => write!(f, "BitTorrent {}", v.as_str()),
|
||||
Self::Deluge(v) => write!(f, "Deluge {}", v.as_str()),
|
||||
Self::LibTorrentRakshasa(v) => write!(f, "lt (rakshasa) {}", v.as_str()),
|
||||
Self::LibTorrentRasterbar(v) => write!(f, "lt (rasterbar) {}", v.as_str()),
|
||||
Self::QBitTorrent(v) => write!(f, "QBitTorrent {}", v.as_str()),
|
||||
Self::Transmission(v) => write!(f, "Transmission {}", v.as_str()),
|
||||
Self::UTorrent(v) => write!(f, "µTorrent {}", v.as_str()),
|
||||
Self::UTorrentEmbedded(v) => write!(f, "µTorrent Emb. {}", v.as_str()),
|
||||
Self::UTorrentMac(v) => write!(f, "µTorrent Mac {}", v.as_str()),
|
||||
Self::UTorrentWeb(v) => write!(f, "µTorrent Web {}", v.as_str()),
|
||||
Self::Vuze(v) => write!(f, "Vuze {}", v.as_str()),
|
||||
Self::WebTorrent(v) => write!(f, "WebTorrent {}", v.as_str()),
|
||||
Self::WebTorrentDesktop(v) => write!(f, "WebTorrent Desktop {}", v.as_str()),
|
||||
Self::Mainline(v) => write!(f, "Mainline {}", v.as_str()),
|
||||
Self::OtherWithPrefixAndVersion { prefix, version } => {
|
||||
write!(f, "Other ({}) ({})", prefix.as_str(), version.as_str())
|
||||
}
|
||||
Self::OtherWithPrefix(prefix) => write!(f, "Other ({})", prefix.as_str()),
|
||||
Self::Other => f.write_str("Other"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickcheck")]
|
||||
impl quickcheck::Arbitrary for PeerId {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut bytes = [0u8; 20];
|
||||
|
||||
for byte in bytes.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "quickcheck")]
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn create_peer_id(bytes: &[u8]) -> PeerId {
|
||||
let mut peer_id = PeerId([0; 20]);
|
||||
|
||||
let len = bytes.len();
|
||||
|
||||
(&mut peer_id.0[..len]).copy_from_slice(bytes);
|
||||
|
||||
peer_id
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_from_peer_id() {
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-lt1234-k/asdh3")),
|
||||
PeerClient::LibTorrentRakshasa("1.23.4".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-DE123s-k/asdh3")),
|
||||
PeerClient::Deluge("1.2.3 stable".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-DE123r-k/asdh3")),
|
||||
PeerClient::Deluge("1.2.3 rc".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-UT123A-k/asdh3")),
|
||||
PeerClient::UTorrent("1.2.3 alpha".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-TR0012-k/asdh3")),
|
||||
PeerClient::Transmission("0.12".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-TR1212-k/asdh3")),
|
||||
PeerClient::Transmission("1.21".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-WW0102-k/asdh3")),
|
||||
PeerClient::WebTorrent("1.2".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-WW1302-k/asdh3")),
|
||||
PeerClient::WebTorrent("13.2".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"-WW1324-k/asdh3")),
|
||||
PeerClient::WebTorrent("13.24".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"M1-2-3--k/asdh3")),
|
||||
PeerClient::Mainline("1.2.3".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"M1-23-4-k/asdh3")),
|
||||
PeerClient::Mainline("1.23.4".into())
|
||||
);
|
||||
assert_eq!(
|
||||
PeerClient::from_peer_id(&create_peer_id(b"S3-k/asdh3")),
|
||||
PeerClient::OtherWithPrefix("S3".into())
|
||||
);
|
||||
}
|
||||
}
|
||||
23
crates/toml_config/Cargo.toml
Normal file
23
crates/toml_config/Cargo.toml
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
[package]
|
||||
name = "aquatic_toml_config"
|
||||
description = "Serialize toml with comments"
|
||||
keywords = ["toml"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_toml_config"
|
||||
|
||||
[dependencies]
|
||||
toml = "0.5"
|
||||
aquatic_toml_config_derive.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
126
crates/toml_config/src/lib.rs
Normal file
126
crates/toml_config/src/lib.rs
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
pub use aquatic_toml_config_derive::TomlConfig;
|
||||
pub use toml;
|
||||
|
||||
/// Run this on your struct implementing TomlConfig to generate a
|
||||
/// serialization/deserialization test for it.
|
||||
#[macro_export]
|
||||
macro_rules! gen_serialize_deserialize_test {
|
||||
($ident:ident) => {
|
||||
#[test]
|
||||
fn test_cargo_toml_serialize_deserialize() {
|
||||
use ::aquatic_toml_config::TomlConfig;
|
||||
let serialized = $ident::default_to_string();
|
||||
let deserialized = ::aquatic_toml_config::toml::de::from_str(&serialized).unwrap();
|
||||
|
||||
assert_eq!($ident::default(), deserialized);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Export structs to toml, converting Rust doc strings to comments.
|
||||
///
|
||||
/// Supports one level of nesting. Fields containing structs must come
|
||||
/// after regular fields.
|
||||
///
|
||||
/// Usage:
|
||||
/// ```
|
||||
/// use aquatic_toml_config::TomlConfig;
|
||||
///
|
||||
/// #[derive(TomlConfig)]
|
||||
/// struct SubConfig {
|
||||
/// /// A
|
||||
/// a: usize,
|
||||
/// /// B
|
||||
/// b: String,
|
||||
/// }
|
||||
///
|
||||
/// impl Default for SubConfig {
|
||||
/// fn default() -> Self {
|
||||
/// Self {
|
||||
/// a: 200,
|
||||
/// b: "subconfig hello".into(),
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// #[derive(TomlConfig)]
|
||||
/// struct Config {
|
||||
/// /// A
|
||||
/// a: usize,
|
||||
/// /// B
|
||||
/// b: String,
|
||||
/// /// C
|
||||
/// c: SubConfig,
|
||||
/// }
|
||||
///
|
||||
/// impl Default for Config {
|
||||
/// fn default() -> Self {
|
||||
/// Self {
|
||||
/// a: 100,
|
||||
/// b: "hello".into(),
|
||||
/// c: Default::default(),
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let expected = "# A\na = 100\n# B\nb = \"hello\"\n\n# C\n[c]\n# A\na = 200\n# B\nb = \"subconfig hello\"\n";
|
||||
///
|
||||
/// assert_eq!(
|
||||
/// Config::default_to_string(),
|
||||
/// expected,
|
||||
/// );
|
||||
/// ```
|
||||
pub trait TomlConfig: Default {
|
||||
fn default_to_string() -> String;
|
||||
}
|
||||
|
||||
pub mod __private {
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
pub trait Private {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String;
|
||||
}
|
||||
|
||||
macro_rules! impl_trait {
|
||||
($ident:ident) => {
|
||||
impl Private for $ident {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
|
||||
let value = crate::toml::ser::to_string(self).unwrap();
|
||||
|
||||
output.push_str(&format!("{} = {}\n", field_name, value));
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
impl_trait!(isize);
|
||||
impl_trait!(i8);
|
||||
impl_trait!(i16);
|
||||
impl_trait!(i32);
|
||||
impl_trait!(i64);
|
||||
|
||||
impl_trait!(usize);
|
||||
impl_trait!(u8);
|
||||
impl_trait!(u16);
|
||||
impl_trait!(u32);
|
||||
impl_trait!(u64);
|
||||
|
||||
impl_trait!(f32);
|
||||
impl_trait!(f64);
|
||||
|
||||
impl_trait!(bool);
|
||||
|
||||
impl_trait!(String);
|
||||
|
||||
impl_trait!(PathBuf);
|
||||
impl_trait!(SocketAddr);
|
||||
}
|
||||
46
crates/toml_config/tests/test.rs
Normal file
46
crates/toml_config/tests/test.rs
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
use serde::Deserialize;
|
||||
|
||||
use aquatic_toml_config::{gen_serialize_deserialize_test, TomlConfig};
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, TomlConfig, Deserialize)]
|
||||
struct TestConfigInnerA {
|
||||
/// Comment for a
|
||||
a: String,
|
||||
/// Comment for b
|
||||
b: usize,
|
||||
}
|
||||
|
||||
impl Default for TestConfigInnerA {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
a: "Inner hello world".into(),
|
||||
b: 100,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Comment for TestConfig
|
||||
#[derive(Clone, Debug, PartialEq, Eq, TomlConfig, Deserialize)]
|
||||
struct TestConfig {
|
||||
/// Comment for a that stretches over
|
||||
/// multiple lines
|
||||
a: String,
|
||||
/// Comment for b
|
||||
b: usize,
|
||||
c: bool,
|
||||
/// Comment for TestConfigInnerA
|
||||
inner_a: TestConfigInnerA,
|
||||
}
|
||||
|
||||
impl Default for TestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
a: "Hello, world!".into(),
|
||||
b: 100,
|
||||
c: true,
|
||||
inner_a: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
gen_serialize_deserialize_test!(TestConfig);
|
||||
20
crates/toml_config_derive/Cargo.toml
Normal file
20
crates/toml_config_derive/Cargo.toml
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
[package]
|
||||
name = "aquatic_toml_config_derive"
|
||||
description = "Serialize toml with comments"
|
||||
exclude = ["target"]
|
||||
keywords = ["toml"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
proc-macro2 = "1"
|
||||
quote = "1"
|
||||
syn = "1"
|
||||
177
crates/toml_config_derive/src/lib.rs
Normal file
177
crates/toml_config_derive/src/lib.rs
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
use proc_macro2::{TokenStream, TokenTree};
|
||||
use quote::quote;
|
||||
use syn::{parse_macro_input, Attribute, Data, DataStruct, DeriveInput, Fields, Ident, Type};
|
||||
|
||||
#[proc_macro_derive(TomlConfig)]
|
||||
pub fn derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as DeriveInput);
|
||||
|
||||
let comment = extract_comment_string(input.attrs);
|
||||
let ident = input.ident;
|
||||
|
||||
match input.data {
|
||||
Data::Struct(struct_data) => {
|
||||
let mut output_stream = quote! {
|
||||
let mut output = String::new();
|
||||
};
|
||||
|
||||
extract_from_struct(ident.clone(), struct_data, &mut output_stream);
|
||||
|
||||
proc_macro::TokenStream::from(quote! {
|
||||
impl ::aquatic_toml_config::TomlConfig for #ident {
|
||||
fn default_to_string() -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
let comment: Option<String> = #comment;
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
output.push('\n');
|
||||
}
|
||||
|
||||
let body = {
|
||||
#output_stream
|
||||
|
||||
output
|
||||
};
|
||||
|
||||
output.push_str(&body);
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
impl ::aquatic_toml_config::__private::Private for #ident {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String {
|
||||
let mut output = String::new();
|
||||
|
||||
output.push('\n');
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
output.push_str(&format!("[{}]\n", field_name));
|
||||
|
||||
let body = {
|
||||
#output_stream
|
||||
|
||||
output
|
||||
};
|
||||
|
||||
output.push_str(&body);
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
Data::Enum(_) => proc_macro::TokenStream::from(quote! {
|
||||
impl ::aquatic_toml_config::__private::Private for #ident {
|
||||
fn __to_string(&self, comment: Option<String>, field_name: String) -> String {
|
||||
let mut output = String::new();
|
||||
let wrapping_comment: Option<String> = #comment;
|
||||
|
||||
if let Some(comment) = wrapping_comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
|
||||
if let Some(comment) = comment {
|
||||
output.push_str(&comment);
|
||||
}
|
||||
|
||||
let value = match ::aquatic_toml_config::toml::ser::to_string(self) {
|
||||
Ok(value) => value,
|
||||
Err(err) => panic!("Couldn't serialize enum to toml: {:#}", err),
|
||||
};
|
||||
|
||||
output.push_str(&format!("{} = {}\n", field_name, value));
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
}),
|
||||
Data::Union(_) => panic!("Unions are not supported"),
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_from_struct(
|
||||
struct_ty_ident: Ident,
|
||||
struct_data: DataStruct,
|
||||
output_stream: &mut TokenStream,
|
||||
) {
|
||||
let fields = if let Fields::Named(fields) = struct_data.fields {
|
||||
fields
|
||||
} else {
|
||||
panic!("Fields are not named");
|
||||
};
|
||||
|
||||
output_stream.extend(::std::iter::once(quote! {
|
||||
let struct_default = #struct_ty_ident::default();
|
||||
}));
|
||||
|
||||
for field in fields.named.into_iter() {
|
||||
let ident = field.ident.expect("Encountered unnamed field");
|
||||
let ident_string = format!("{}", ident);
|
||||
let comment = extract_comment_string(field.attrs);
|
||||
|
||||
if let Type::Path(path) = field.ty {
|
||||
output_stream.extend(::std::iter::once(quote! {
|
||||
{
|
||||
let comment: Option<String> = #comment;
|
||||
let field_default: #path = struct_default.#ident;
|
||||
|
||||
let s: String = ::aquatic_toml_config::__private::Private::__to_string(
|
||||
&field_default,
|
||||
comment,
|
||||
#ident_string.to_string()
|
||||
);
|
||||
output.push_str(&s);
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_comment_string(attrs: Vec<Attribute>) -> TokenStream {
|
||||
let mut output = String::new();
|
||||
|
||||
for attr in attrs.into_iter() {
|
||||
let path_ident = if let Some(path_ident) = attr.path.get_ident() {
|
||||
path_ident
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if format!("{}", path_ident) != "doc" {
|
||||
continue;
|
||||
}
|
||||
|
||||
for token_tree in attr.tokens {
|
||||
match token_tree {
|
||||
TokenTree::Literal(literal) => {
|
||||
let mut comment = format!("{}", literal);
|
||||
|
||||
// Strip leading and trailing quotation marks
|
||||
comment.remove(comment.len() - 1);
|
||||
comment.remove(0);
|
||||
|
||||
// Add toml comment indicator
|
||||
comment.insert(0, '#');
|
||||
|
||||
output.push_str(&comment);
|
||||
output.push('\n');
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if output.is_empty() {
|
||||
quote! {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
quote! {
|
||||
Some(#output.to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
61
crates/udp/Cargo.toml
Normal file
61
crates/udp/Cargo.toml
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
[package]
|
||||
name = "aquatic_udp"
|
||||
description = "High-performance open UDP BitTorrent tracker"
|
||||
keywords = ["udp", "server", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[lib]
|
||||
name = "aquatic_udp"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_udp"
|
||||
|
||||
[features]
|
||||
default = ["prometheus"]
|
||||
cpu-pinning = ["aquatic_common/hwloc"]
|
||||
prometheus = ["metrics", "metrics-util", "metrics-exporter-prometheus"]
|
||||
io-uring = ["dep:io-uring"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
aquatic_udp_protocol.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
blake3 = "1"
|
||||
cfg-if = "1"
|
||||
compact_str = "0.7"
|
||||
constant_time_eq = "0.3"
|
||||
crossbeam-channel = "0.5"
|
||||
getrandom = "0.2"
|
||||
hashbrown = { version = "0.14", default-features = false }
|
||||
hdrhistogram = "7"
|
||||
hex = "0.4"
|
||||
io-uring = { version = "0.6", optional = true }
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
metrics = { version = "0.21", optional = true }
|
||||
metrics-util = { version = "0.15", optional = true }
|
||||
metrics-exporter-prometheus = { version = "0.12", optional = true, default-features = false, features = ["http-listener"] }
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
mio = { version = "0.8", features = ["net", "os-poll"] }
|
||||
num-format = "0.4"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
signal-hook = { version = "0.3" }
|
||||
slab = "0.4"
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
time = { version = "0.3", features = ["formatting"] }
|
||||
tinytemplate = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
tempfile = "3"
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
254
crates/udp/src/common.rs
Normal file
254
crates/udp/src/common.rs
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
use std::collections::BTreeMap;
|
||||
use std::hash::Hash;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crossbeam_channel::{Sender, TrySendError};
|
||||
|
||||
use aquatic_common::access_list::AccessListArcSwap;
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::*;
|
||||
use hdrhistogram::Histogram;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
pub const BUFFER_SIZE: usize = 8192;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PendingScrapeRequest {
|
||||
pub slab_key: usize,
|
||||
pub info_hashes: BTreeMap<usize, InfoHash>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PendingScrapeResponse {
|
||||
pub slab_key: usize,
|
||||
pub torrent_stats: BTreeMap<usize, TorrentScrapeStatistics>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ConnectedRequest {
|
||||
Announce(AnnounceRequest),
|
||||
Scrape(PendingScrapeRequest),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ConnectedResponse {
|
||||
AnnounceIpv4(AnnounceResponse<Ipv4Addr>),
|
||||
AnnounceIpv6(AnnounceResponse<Ipv6Addr>),
|
||||
Scrape(PendingScrapeResponse),
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct SocketWorkerIndex(pub usize);
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct SwarmWorkerIndex(pub usize);
|
||||
|
||||
impl SwarmWorkerIndex {
|
||||
pub fn from_info_hash(config: &Config, info_hash: InfoHash) -> Self {
|
||||
Self(info_hash.0[0] as usize % config.swarm_workers)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectedRequestSender {
|
||||
index: SocketWorkerIndex,
|
||||
senders: Vec<Sender<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>>,
|
||||
}
|
||||
|
||||
impl ConnectedRequestSender {
|
||||
pub fn new(
|
||||
index: SocketWorkerIndex,
|
||||
senders: Vec<Sender<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>>,
|
||||
) -> Self {
|
||||
Self { index, senders }
|
||||
}
|
||||
|
||||
pub fn try_send_to(
|
||||
&self,
|
||||
index: SwarmWorkerIndex,
|
||||
request: ConnectedRequest,
|
||||
addr: CanonicalSocketAddr,
|
||||
) {
|
||||
match self.senders[index.0].try_send((self.index, request, addr)) {
|
||||
Ok(()) => {}
|
||||
Err(TrySendError::Full(_)) => {
|
||||
::log::error!("Request channel {} is full, dropping request. Try increasing number of swarm workers or raising config.worker_channel_size.", index.0)
|
||||
}
|
||||
Err(TrySendError::Disconnected(_)) => {
|
||||
panic!("Request channel {} is disconnected", index.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConnectedResponseSender {
|
||||
senders: Vec<Sender<(ConnectedResponse, CanonicalSocketAddr)>>,
|
||||
}
|
||||
|
||||
impl ConnectedResponseSender {
|
||||
pub fn new(senders: Vec<Sender<(ConnectedResponse, CanonicalSocketAddr)>>) -> Self {
|
||||
Self { senders }
|
||||
}
|
||||
|
||||
pub fn try_send_to(
|
||||
&self,
|
||||
index: SocketWorkerIndex,
|
||||
response: ConnectedResponse,
|
||||
addr: CanonicalSocketAddr,
|
||||
) {
|
||||
match self.senders[index.0].try_send((response, addr)) {
|
||||
Ok(()) => {}
|
||||
Err(TrySendError::Full(_)) => {
|
||||
::log::error!("Response channel {} is full, dropping response. Try increasing number of socket workers or raising config.worker_channel_size.", index.0)
|
||||
}
|
||||
Err(TrySendError::Disconnected(_)) => {
|
||||
panic!("Response channel {} is disconnected", index.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub enum PeerStatus {
|
||||
Seeding,
|
||||
Leeching,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
impl PeerStatus {
|
||||
/// Determine peer status from announce event and number of bytes left.
|
||||
///
|
||||
/// Likely, the last branch will be taken most of the time.
|
||||
#[inline]
|
||||
pub fn from_event_and_bytes_left(event: AnnounceEvent, bytes_left: NumberOfBytes) -> Self {
|
||||
if event == AnnounceEvent::Stopped {
|
||||
Self::Stopped
|
||||
} else if bytes_left.0 == 0 {
|
||||
Self::Seeding
|
||||
} else {
|
||||
Self::Leeching
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum StatisticsMessage {
|
||||
Ipv4PeerHistogram(Histogram<u64>),
|
||||
Ipv6PeerHistogram(Histogram<u64>),
|
||||
PeerAdded(PeerId),
|
||||
PeerRemoved(PeerId),
|
||||
}
|
||||
|
||||
pub struct Statistics {
|
||||
pub requests_received: AtomicUsize,
|
||||
pub responses_sent_connect: AtomicUsize,
|
||||
pub responses_sent_announce: AtomicUsize,
|
||||
pub responses_sent_scrape: AtomicUsize,
|
||||
pub responses_sent_error: AtomicUsize,
|
||||
pub bytes_received: AtomicUsize,
|
||||
pub bytes_sent: AtomicUsize,
|
||||
pub torrents: Vec<AtomicUsize>,
|
||||
pub peers: Vec<AtomicUsize>,
|
||||
}
|
||||
|
||||
impl Statistics {
|
||||
pub fn new(num_swarm_workers: usize) -> Self {
|
||||
Self {
|
||||
requests_received: Default::default(),
|
||||
responses_sent_connect: Default::default(),
|
||||
responses_sent_announce: Default::default(),
|
||||
responses_sent_scrape: Default::default(),
|
||||
responses_sent_error: Default::default(),
|
||||
bytes_received: Default::default(),
|
||||
bytes_sent: Default::default(),
|
||||
torrents: Self::create_atomic_usize_vec(num_swarm_workers),
|
||||
peers: Self::create_atomic_usize_vec(num_swarm_workers),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_atomic_usize_vec(len: usize) -> Vec<AtomicUsize> {
|
||||
::std::iter::repeat_with(|| AtomicUsize::default())
|
||||
.take(len)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct State {
|
||||
pub access_list: Arc<AccessListArcSwap>,
|
||||
pub statistics_ipv4: Arc<Statistics>,
|
||||
pub statistics_ipv6: Arc<Statistics>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
pub fn new(num_swarm_workers: usize) -> Self {
|
||||
Self {
|
||||
access_list: Arc::new(AccessListArcSwap::default()),
|
||||
statistics_ipv4: Arc::new(Statistics::new(num_swarm_workers)),
|
||||
statistics_ipv6: Arc::new(Statistics::new(num_swarm_workers)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::Ipv6Addr;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_peer_status_from_event_and_bytes_left() {
|
||||
use crate::common::*;
|
||||
|
||||
use PeerStatus::*;
|
||||
|
||||
let f = PeerStatus::from_event_and_bytes_left;
|
||||
|
||||
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes(0)));
|
||||
assert_eq!(Stopped, f(AnnounceEvent::Stopped, NumberOfBytes(1)));
|
||||
|
||||
assert_eq!(Seeding, f(AnnounceEvent::Started, NumberOfBytes(0)));
|
||||
assert_eq!(Leeching, f(AnnounceEvent::Started, NumberOfBytes(1)));
|
||||
|
||||
assert_eq!(Seeding, f(AnnounceEvent::Completed, NumberOfBytes(0)));
|
||||
assert_eq!(Leeching, f(AnnounceEvent::Completed, NumberOfBytes(1)));
|
||||
|
||||
assert_eq!(Seeding, f(AnnounceEvent::None, NumberOfBytes(0)));
|
||||
assert_eq!(Leeching, f(AnnounceEvent::None, NumberOfBytes(1)));
|
||||
}
|
||||
|
||||
// Assumes that announce response with maximum amount of ipv6 peers will
|
||||
// be the longest
|
||||
#[test]
|
||||
fn test_buffer_size() {
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
let config = Config::default();
|
||||
|
||||
let peers = ::std::iter::repeat(ResponsePeer {
|
||||
ip_address: Ipv6Addr::new(1, 1, 1, 1, 1, 1, 1, 1),
|
||||
port: Port(1),
|
||||
})
|
||||
.take(config.protocol.max_response_peers)
|
||||
.collect();
|
||||
|
||||
let response = Response::AnnounceIpv6(AnnounceResponse {
|
||||
transaction_id: TransactionId(1),
|
||||
announce_interval: AnnounceInterval(1),
|
||||
seeders: NumberOfPeers(1),
|
||||
leechers: NumberOfPeers(1),
|
||||
peers,
|
||||
});
|
||||
|
||||
let mut buf = Vec::new();
|
||||
|
||||
response.write(&mut buf).unwrap();
|
||||
|
||||
println!("Buffer len: {}", buf.len());
|
||||
|
||||
assert!(buf.len() <= BUFFER_SIZE);
|
||||
}
|
||||
}
|
||||
267
crates/udp/src/config.rs
Normal file
267
crates/udp/src/config.rs
Normal file
|
|
@ -0,0 +1,267 @@
|
|||
use std::{net::SocketAddr, path::PathBuf};
|
||||
|
||||
use aquatic_common::{access_list::AccessListConfig, privileges::PrivilegeConfig};
|
||||
use cfg_if::cfg_if;
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
/// aquatic_udp configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Number of socket workers. Increase with core count
|
||||
///
|
||||
/// Socket workers receive requests from clients and parse them.
|
||||
/// Responses to connect requests are sent back immediately. Announce and
|
||||
/// scrape requests are passed on to swarm workers, which generate
|
||||
/// responses and send them back to the socket worker, which sends them
|
||||
/// to the client.
|
||||
pub socket_workers: usize,
|
||||
/// Number of swarm workers. One is enough in almost all cases
|
||||
///
|
||||
/// Swarm workers receive parsed announce and scrape requests from socket
|
||||
/// workers, generate responses and send them back to the socket workers.
|
||||
pub swarm_workers: usize,
|
||||
pub log_level: LogLevel,
|
||||
/// Maximum number of items in each channel passing requests/responses
|
||||
/// between workers. A value of zero means that the channels will be of
|
||||
/// unbounded size.
|
||||
pub worker_channel_size: usize,
|
||||
/// How long to block waiting for requests in swarm workers.
|
||||
///
|
||||
/// Higher values means that with zero traffic, the worker will not
|
||||
/// unnecessarily cause the CPU to wake up as often. However, high values
|
||||
/// (something like larger than 1000) combined with very low traffic can
|
||||
/// cause delays in torrent cleaning.
|
||||
pub request_channel_recv_timeout_ms: u64,
|
||||
pub network: NetworkConfig,
|
||||
pub protocol: ProtocolConfig,
|
||||
pub statistics: StatisticsConfig,
|
||||
pub cleaning: CleaningConfig,
|
||||
pub privileges: PrivilegeConfig,
|
||||
pub access_list: AccessListConfig,
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pub cpu_pinning: aquatic_common::cpu_pinning::asc::CpuPinningConfigAsc,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
socket_workers: 1,
|
||||
swarm_workers: 1,
|
||||
log_level: LogLevel::Error,
|
||||
worker_channel_size: 0,
|
||||
request_channel_recv_timeout_ms: 100,
|
||||
network: NetworkConfig::default(),
|
||||
protocol: ProtocolConfig::default(),
|
||||
statistics: StatisticsConfig::default(),
|
||||
cleaning: CleaningConfig::default(),
|
||||
privileges: PrivilegeConfig::default(),
|
||||
access_list: AccessListConfig::default(),
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
cpu_pinning: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// Bind to this address
|
||||
pub address: SocketAddr,
|
||||
/// Only allow access over IPv6
|
||||
pub only_ipv6: bool,
|
||||
/// Size of socket recv buffer. Use 0 for OS default.
|
||||
///
|
||||
/// This setting can have a big impact on dropped packages. It might
|
||||
/// require changing system defaults. Some examples of commands to set
|
||||
/// values for different operating systems:
|
||||
///
|
||||
/// macOS:
|
||||
/// $ sudo sysctl net.inet.udp.recvspace=6000000
|
||||
///
|
||||
/// Linux:
|
||||
/// $ sudo sysctl -w net.core.rmem_max=104857600
|
||||
/// $ sudo sysctl -w net.core.rmem_default=104857600
|
||||
pub socket_recv_buffer_size: usize,
|
||||
/// Poll event capacity (mio backend only)
|
||||
pub poll_event_capacity: usize,
|
||||
/// Poll timeout in milliseconds (mio backend only)
|
||||
pub poll_timeout_ms: u64,
|
||||
/// Number of ring entries (io_uring backend only)
|
||||
///
|
||||
/// Will be rounded to next power of two if not already one. Increasing
|
||||
/// this value can help throughput up to a certain point.
|
||||
#[cfg(feature = "io-uring")]
|
||||
pub ring_size: u16,
|
||||
/// Store this many responses at most for retrying (once) on send failure
|
||||
/// (mio backend only)
|
||||
///
|
||||
/// Useful on operating systems that do not provide an udp send buffer,
|
||||
/// such as FreeBSD. Setting the value to zero disables resending
|
||||
/// functionality.
|
||||
pub resend_buffer_max_len: usize,
|
||||
}
|
||||
|
||||
impl NetworkConfig {
|
||||
pub fn ipv4_active(&self) -> bool {
|
||||
self.address.is_ipv4() || !self.only_ipv6
|
||||
}
|
||||
pub fn ipv6_active(&self) -> bool {
|
||||
self.address.is_ipv6()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
address: SocketAddr::from(([0, 0, 0, 0], 3000)),
|
||||
only_ipv6: false,
|
||||
socket_recv_buffer_size: 4096 * 128,
|
||||
poll_event_capacity: 4096,
|
||||
poll_timeout_ms: 50,
|
||||
#[cfg(feature = "io-uring")]
|
||||
ring_size: 1024,
|
||||
resend_buffer_max_len: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct ProtocolConfig {
|
||||
/// Maximum number of torrents to allow in scrape request
|
||||
pub max_scrape_torrents: u8,
|
||||
/// Maximum number of peers to return in announce response
|
||||
pub max_response_peers: usize,
|
||||
/// Ask peers to announce this often (seconds)
|
||||
pub peer_announce_interval: i32,
|
||||
}
|
||||
|
||||
impl Default for ProtocolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_scrape_torrents: 70,
|
||||
max_response_peers: 50,
|
||||
peer_announce_interval: 60 * 15,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct StatisticsConfig {
|
||||
/// Collect and print/write statistics this often (seconds)
|
||||
pub interval: u64,
|
||||
/// Collect statistics on number of peers per torrent
|
||||
///
|
||||
/// Will increase time taken for torrent cleaning.
|
||||
pub torrent_peer_histograms: bool,
|
||||
/// Collect statistics on peer clients.
|
||||
///
|
||||
/// Also, see `prometheus_peer_id_prefixes`.
|
||||
///
|
||||
/// Expect a certain CPU hit (maybe 5% higher consumption) and a bit higher
|
||||
/// memory use
|
||||
pub peer_clients: bool,
|
||||
/// Print statistics to standard output
|
||||
pub print_to_stdout: bool,
|
||||
/// Save statistics as HTML to a file
|
||||
pub write_html_to_file: bool,
|
||||
/// Path to save HTML file to
|
||||
pub html_file_path: PathBuf,
|
||||
/// Run a prometheus endpoint
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub run_prometheus_endpoint: bool,
|
||||
/// Address to run prometheus endpoint on
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub prometheus_endpoint_address: SocketAddr,
|
||||
/// Serve information on all peer id prefixes on the prometheus endpoint.
|
||||
///
|
||||
/// Requires `peer_clients` to be activated.
|
||||
///
|
||||
/// May consume quite a bit of CPU and RAM, since data on every single peer
|
||||
/// client will be reported continuously on the endpoint
|
||||
#[cfg(feature = "prometheus")]
|
||||
pub prometheus_peer_id_prefixes: bool,
|
||||
}
|
||||
|
||||
impl StatisticsConfig {
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "prometheus")] {
|
||||
pub fn active(&self) -> bool {
|
||||
(self.interval != 0) &
|
||||
(self.print_to_stdout | self.write_html_to_file | self.run_prometheus_endpoint)
|
||||
}
|
||||
} else {
|
||||
pub fn active(&self) -> bool {
|
||||
(self.interval != 0) & (self.print_to_stdout | self.write_html_to_file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for StatisticsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
interval: 5,
|
||||
torrent_peer_histograms: false,
|
||||
peer_clients: false,
|
||||
print_to_stdout: false,
|
||||
write_html_to_file: false,
|
||||
html_file_path: "tmp/statistics.html".into(),
|
||||
#[cfg(feature = "prometheus")]
|
||||
run_prometheus_endpoint: false,
|
||||
#[cfg(feature = "prometheus")]
|
||||
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
|
||||
#[cfg(feature = "prometheus")]
|
||||
prometheus_peer_id_prefixes: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct CleaningConfig {
|
||||
/// Clean torrents this often (seconds)
|
||||
pub torrent_cleaning_interval: u64,
|
||||
/// Clean pending scrape responses this often (seconds)
|
||||
///
|
||||
/// In regular operation, there should be no pending scrape responses
|
||||
/// lingering for long enough to have to be cleaned up this way.
|
||||
pub pending_scrape_cleaning_interval: u64,
|
||||
/// Allow clients to use a connection token for this long (seconds)
|
||||
pub max_connection_age: u32,
|
||||
/// Remove peers who have not announced for this long (seconds)
|
||||
pub max_peer_age: u32,
|
||||
/// Remove pending scrape responses that have not been returned from swarm
|
||||
/// workers for this long (seconds)
|
||||
pub max_pending_scrape_age: u32,
|
||||
}
|
||||
|
||||
impl Default for CleaningConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
torrent_cleaning_interval: 60 * 2,
|
||||
pending_scrape_cleaning_interval: 60 * 10,
|
||||
max_connection_age: 60 * 2,
|
||||
max_peer_age: 60 * 20,
|
||||
max_pending_scrape_age: 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
||||
210
crates/udp/src/lib.rs
Normal file
210
crates/udp/src/lib.rs
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
pub mod common;
|
||||
pub mod config;
|
||||
pub mod workers;
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::thread::Builder;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use crossbeam_channel::{bounded, unbounded};
|
||||
use signal_hook::consts::{SIGTERM, SIGUSR1};
|
||||
use signal_hook::iterator::Signals;
|
||||
|
||||
use aquatic_common::access_list::update_access_list;
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
|
||||
use aquatic_common::privileges::PrivilegeDropper;
|
||||
use aquatic_common::{PanicSentinelWatcher, ServerStartInstant};
|
||||
|
||||
use common::{
|
||||
ConnectedRequestSender, ConnectedResponseSender, SocketWorkerIndex, State, SwarmWorkerIndex,
|
||||
};
|
||||
use config::Config;
|
||||
use workers::socket::ConnectionValidator;
|
||||
|
||||
pub const APP_NAME: &str = "aquatic_udp: UDP BitTorrent tracker";
|
||||
pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
pub fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
let mut signals = Signals::new([SIGUSR1, SIGTERM])?;
|
||||
|
||||
let state = State::new(config.swarm_workers);
|
||||
let connection_validator = ConnectionValidator::new(&config)?;
|
||||
let (sentinel_watcher, sentinel) = PanicSentinelWatcher::create_with_sentinel();
|
||||
let priv_dropper = PrivilegeDropper::new(config.privileges.clone(), config.socket_workers);
|
||||
|
||||
update_access_list(&config.access_list, &state.access_list)?;
|
||||
|
||||
let mut request_senders = Vec::new();
|
||||
let mut request_receivers = BTreeMap::new();
|
||||
|
||||
let mut response_senders = Vec::new();
|
||||
let mut response_receivers = BTreeMap::new();
|
||||
|
||||
let (statistics_sender, statistics_receiver) = unbounded();
|
||||
|
||||
let server_start_instant = ServerStartInstant::new();
|
||||
|
||||
for i in 0..config.swarm_workers {
|
||||
let (request_sender, request_receiver) = if config.worker_channel_size == 0 {
|
||||
unbounded()
|
||||
} else {
|
||||
bounded(config.worker_channel_size)
|
||||
};
|
||||
|
||||
request_senders.push(request_sender);
|
||||
request_receivers.insert(i, request_receiver);
|
||||
}
|
||||
|
||||
for i in 0..config.socket_workers {
|
||||
let (response_sender, response_receiver) = if config.worker_channel_size == 0 {
|
||||
unbounded()
|
||||
} else {
|
||||
bounded(config.worker_channel_size)
|
||||
};
|
||||
|
||||
response_senders.push(response_sender);
|
||||
response_receivers.insert(i, response_receiver);
|
||||
}
|
||||
|
||||
for i in 0..config.swarm_workers {
|
||||
let sentinel = sentinel.clone();
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let request_receiver = request_receivers.remove(&i).unwrap().clone();
|
||||
let response_sender = ConnectedResponseSender::new(response_senders.clone());
|
||||
let statistics_sender = statistics_sender.clone();
|
||||
|
||||
Builder::new()
|
||||
.name(format!("swarm-{:02}", i + 1))
|
||||
.spawn(move || {
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::SwarmWorker(i),
|
||||
);
|
||||
|
||||
workers::swarm::run_swarm_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
server_start_instant,
|
||||
request_receiver,
|
||||
response_sender,
|
||||
statistics_sender,
|
||||
SwarmWorkerIndex(i),
|
||||
)
|
||||
})
|
||||
.with_context(|| "spawn swarm worker")?;
|
||||
}
|
||||
|
||||
for i in 0..config.socket_workers {
|
||||
let sentinel = sentinel.clone();
|
||||
let state = state.clone();
|
||||
let config = config.clone();
|
||||
let connection_validator = connection_validator.clone();
|
||||
let request_sender =
|
||||
ConnectedRequestSender::new(SocketWorkerIndex(i), request_senders.clone());
|
||||
let response_receiver = response_receivers.remove(&i).unwrap();
|
||||
let priv_dropper = priv_dropper.clone();
|
||||
|
||||
Builder::new()
|
||||
.name(format!("socket-{:02}", i + 1))
|
||||
.spawn(move || {
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::SocketWorker(i),
|
||||
);
|
||||
|
||||
workers::socket::run_socket_worker(
|
||||
sentinel,
|
||||
state,
|
||||
config,
|
||||
connection_validator,
|
||||
server_start_instant,
|
||||
request_sender,
|
||||
response_receiver,
|
||||
priv_dropper,
|
||||
);
|
||||
})
|
||||
.with_context(|| "spawn socket worker")?;
|
||||
}
|
||||
|
||||
if config.statistics.active() {
|
||||
let sentinel = sentinel.clone();
|
||||
let state = state.clone();
|
||||
let config = config.clone();
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||
use metrics_util::MetricKindMask;
|
||||
|
||||
PrometheusBuilder::new()
|
||||
.idle_timeout(
|
||||
MetricKindMask::ALL,
|
||||
Some(Duration::from_secs(config.statistics.interval * 2)),
|
||||
)
|
||||
.with_http_listener(config.statistics.prometheus_endpoint_address)
|
||||
.install()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Install prometheus endpoint on {}",
|
||||
config.statistics.prometheus_endpoint_address
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
Builder::new()
|
||||
.name("statistics".into())
|
||||
.spawn(move || {
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::Util,
|
||||
);
|
||||
|
||||
workers::statistics::run_statistics_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
statistics_receiver,
|
||||
);
|
||||
})
|
||||
.with_context(|| "spawn statistics worker")?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::Util,
|
||||
);
|
||||
|
||||
for signal in &mut signals {
|
||||
match signal {
|
||||
SIGUSR1 => {
|
||||
let _ = update_access_list(&config.access_list, &state.access_list);
|
||||
}
|
||||
SIGTERM => {
|
||||
if sentinel_watcher.panic_was_triggered() {
|
||||
return Err(anyhow::anyhow!("worker thread panicked"));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
11
crates/udp/src/main.rs
Normal file
11
crates/udp/src/main.rs
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn main() {
|
||||
aquatic_common::cli::run_app_with_cli_and_config::<aquatic_udp::config::Config>(
|
||||
aquatic_udp::APP_NAME,
|
||||
aquatic_udp::APP_VERSION,
|
||||
aquatic_udp::run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
3
crates/udp/src/workers/mod.rs
Normal file
3
crates/udp/src/workers/mod.rs
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
pub mod socket;
|
||||
pub mod statistics;
|
||||
pub mod swarm;
|
||||
432
crates/udp/src/workers/socket/mio.rs
Normal file
432
crates/udp/src/workers/socket/mio.rs
Normal file
|
|
@ -0,0 +1,432 @@
|
|||
use std::io::{Cursor, ErrorKind};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use aquatic_common::access_list::AccessListCache;
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use crossbeam_channel::Receiver;
|
||||
use mio::net::UdpSocket;
|
||||
use mio::{Events, Interest, Poll, Token};
|
||||
|
||||
use aquatic_common::{
|
||||
access_list::create_access_list_cache, privileges::PrivilegeDropper, CanonicalSocketAddr,
|
||||
PanicSentinel, ValidUntil,
|
||||
};
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use super::storage::PendingScrapeResponseSlab;
|
||||
use super::validator::ConnectionValidator;
|
||||
use super::{create_socket, EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
|
||||
|
||||
pub struct SocketWorker {
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
request_sender: ConnectedRequestSender,
|
||||
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
access_list_cache: AccessListCache,
|
||||
validator: ConnectionValidator,
|
||||
server_start_instant: ServerStartInstant,
|
||||
pending_scrape_responses: PendingScrapeResponseSlab,
|
||||
socket: UdpSocket,
|
||||
buffer: [u8; BUFFER_SIZE],
|
||||
}
|
||||
|
||||
impl SocketWorker {
|
||||
pub fn run(
|
||||
_sentinel: PanicSentinel,
|
||||
shared_state: State,
|
||||
config: Config,
|
||||
validator: ConnectionValidator,
|
||||
server_start_instant: ServerStartInstant,
|
||||
request_sender: ConnectedRequestSender,
|
||||
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
) {
|
||||
let socket =
|
||||
UdpSocket::from_std(create_socket(&config, priv_dropper).expect("create socket"));
|
||||
let access_list_cache = create_access_list_cache(&shared_state.access_list);
|
||||
|
||||
let mut worker = Self {
|
||||
config,
|
||||
shared_state,
|
||||
validator,
|
||||
server_start_instant,
|
||||
request_sender,
|
||||
response_receiver,
|
||||
access_list_cache,
|
||||
pending_scrape_responses: Default::default(),
|
||||
socket,
|
||||
buffer: [0; BUFFER_SIZE],
|
||||
};
|
||||
|
||||
worker.run_inner();
|
||||
}
|
||||
|
||||
pub fn run_inner(&mut self) {
|
||||
let mut local_responses = Vec::new();
|
||||
let mut opt_resend_buffer =
|
||||
(self.config.network.resend_buffer_max_len > 0).then_some(Vec::new());
|
||||
|
||||
let mut events = Events::with_capacity(self.config.network.poll_event_capacity);
|
||||
let mut poll = Poll::new().expect("create poll");
|
||||
|
||||
poll.registry()
|
||||
.register(&mut self.socket, Token(0), Interest::READABLE)
|
||||
.expect("register poll");
|
||||
|
||||
let poll_timeout = Duration::from_millis(self.config.network.poll_timeout_ms);
|
||||
|
||||
let pending_scrape_cleaning_duration =
|
||||
Duration::from_secs(self.config.cleaning.pending_scrape_cleaning_interval);
|
||||
|
||||
let mut pending_scrape_valid_until = ValidUntil::new(
|
||||
self.server_start_instant,
|
||||
self.config.cleaning.max_pending_scrape_age,
|
||||
);
|
||||
let mut last_pending_scrape_cleaning = Instant::now();
|
||||
|
||||
let mut iter_counter = 0usize;
|
||||
|
||||
loop {
|
||||
poll.poll(&mut events, Some(poll_timeout))
|
||||
.expect("failed polling");
|
||||
|
||||
for event in events.iter() {
|
||||
if event.is_readable() {
|
||||
self.read_and_handle_requests(&mut local_responses, pending_scrape_valid_until);
|
||||
}
|
||||
}
|
||||
|
||||
// If resend buffer is enabled, send any responses in it
|
||||
if let Some(resend_buffer) = opt_resend_buffer.as_mut() {
|
||||
for (response, addr) in resend_buffer.drain(..) {
|
||||
Self::send_response(
|
||||
&self.config,
|
||||
&self.shared_state,
|
||||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut None,
|
||||
response,
|
||||
addr,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Send any connect and error responses generated by this socket worker
|
||||
for (response, addr) in local_responses.drain(..) {
|
||||
Self::send_response(
|
||||
&self.config,
|
||||
&self.shared_state,
|
||||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut opt_resend_buffer,
|
||||
response,
|
||||
addr,
|
||||
);
|
||||
}
|
||||
|
||||
// Check channel for any responses generated by swarm workers
|
||||
for (response, addr) in self.response_receiver.try_iter() {
|
||||
let opt_response = match response {
|
||||
ConnectedResponse::Scrape(r) => self
|
||||
.pending_scrape_responses
|
||||
.add_and_get_finished(r)
|
||||
.map(Response::Scrape),
|
||||
ConnectedResponse::AnnounceIpv4(r) => Some(Response::AnnounceIpv4(r)),
|
||||
ConnectedResponse::AnnounceIpv6(r) => Some(Response::AnnounceIpv6(r)),
|
||||
};
|
||||
|
||||
if let Some(response) = opt_response {
|
||||
Self::send_response(
|
||||
&self.config,
|
||||
&self.shared_state,
|
||||
&mut self.socket,
|
||||
&mut self.buffer,
|
||||
&mut opt_resend_buffer,
|
||||
response,
|
||||
addr,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Run periodic ValidUntil updates and state cleaning
|
||||
if iter_counter % 256 == 0 {
|
||||
let seconds_since_start = self.server_start_instant.seconds_elapsed();
|
||||
|
||||
pending_scrape_valid_until = ValidUntil::new_with_now(
|
||||
seconds_since_start,
|
||||
self.config.cleaning.max_pending_scrape_age,
|
||||
);
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
if now > last_pending_scrape_cleaning + pending_scrape_cleaning_duration {
|
||||
self.pending_scrape_responses.clean(seconds_since_start);
|
||||
|
||||
last_pending_scrape_cleaning = now;
|
||||
}
|
||||
}
|
||||
|
||||
iter_counter = iter_counter.wrapping_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn read_and_handle_requests(
|
||||
&mut self,
|
||||
local_responses: &mut Vec<(Response, CanonicalSocketAddr)>,
|
||||
pending_scrape_valid_until: ValidUntil,
|
||||
) {
|
||||
let mut requests_received_ipv4: usize = 0;
|
||||
let mut requests_received_ipv6: usize = 0;
|
||||
let mut bytes_received_ipv4: usize = 0;
|
||||
let mut bytes_received_ipv6 = 0;
|
||||
|
||||
loop {
|
||||
match self.socket.recv_from(&mut self.buffer[..]) {
|
||||
Ok((bytes_read, src)) => {
|
||||
if src.port() == 0 {
|
||||
::log::info!("Ignored request from {} because source port is zero", src);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
let src = CanonicalSocketAddr::new(src);
|
||||
|
||||
let request_parsable = match Request::from_bytes(
|
||||
&self.buffer[..bytes_read],
|
||||
self.config.protocol.max_scrape_torrents,
|
||||
) {
|
||||
Ok(request) => {
|
||||
self.handle_request(
|
||||
local_responses,
|
||||
pending_scrape_valid_until,
|
||||
request,
|
||||
src,
|
||||
);
|
||||
|
||||
true
|
||||
}
|
||||
Err(err) => {
|
||||
::log::debug!("Request::from_bytes error: {:?}", err);
|
||||
|
||||
if let RequestParseError::Sendable {
|
||||
connection_id,
|
||||
transaction_id,
|
||||
err,
|
||||
} = err
|
||||
{
|
||||
if self.validator.connection_id_valid(src, connection_id) {
|
||||
let response = ErrorResponse {
|
||||
transaction_id,
|
||||
message: err.right_or("Parse error").into(),
|
||||
};
|
||||
|
||||
local_responses.push((response.into(), src));
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
// Update statistics for converted address
|
||||
if src.is_ipv4() {
|
||||
if request_parsable {
|
||||
requests_received_ipv4 += 1;
|
||||
}
|
||||
bytes_received_ipv4 += bytes_read + EXTRA_PACKET_SIZE_IPV4;
|
||||
} else {
|
||||
if request_parsable {
|
||||
requests_received_ipv6 += 1;
|
||||
}
|
||||
bytes_received_ipv6 += bytes_read + EXTRA_PACKET_SIZE_IPV6;
|
||||
}
|
||||
}
|
||||
Err(err) if err.kind() == ErrorKind::WouldBlock => {
|
||||
break;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::warn!("recv_from error: {:#}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self.config.statistics.active() {
|
||||
self.shared_state
|
||||
.statistics_ipv4
|
||||
.requests_received
|
||||
.fetch_add(requests_received_ipv4, Ordering::Relaxed);
|
||||
self.shared_state
|
||||
.statistics_ipv6
|
||||
.requests_received
|
||||
.fetch_add(requests_received_ipv6, Ordering::Relaxed);
|
||||
self.shared_state
|
||||
.statistics_ipv4
|
||||
.bytes_received
|
||||
.fetch_add(bytes_received_ipv4, Ordering::Relaxed);
|
||||
self.shared_state
|
||||
.statistics_ipv6
|
||||
.bytes_received
|
||||
.fetch_add(bytes_received_ipv6, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(
|
||||
&mut self,
|
||||
local_responses: &mut Vec<(Response, CanonicalSocketAddr)>,
|
||||
pending_scrape_valid_until: ValidUntil,
|
||||
request: Request,
|
||||
src: CanonicalSocketAddr,
|
||||
) {
|
||||
let access_list_mode = self.config.access_list.mode;
|
||||
|
||||
match request {
|
||||
Request::Connect(request) => {
|
||||
let connection_id = self.validator.create_connection_id(src);
|
||||
|
||||
let response = Response::Connect(ConnectResponse {
|
||||
connection_id,
|
||||
transaction_id: request.transaction_id,
|
||||
});
|
||||
|
||||
local_responses.push((response, src))
|
||||
}
|
||||
Request::Announce(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
if self
|
||||
.access_list_cache
|
||||
.load()
|
||||
.allows(access_list_mode, &request.info_hash.0)
|
||||
{
|
||||
let worker_index =
|
||||
SwarmWorkerIndex::from_info_hash(&self.config, request.info_hash);
|
||||
|
||||
self.request_sender.try_send_to(
|
||||
worker_index,
|
||||
ConnectedRequest::Announce(request),
|
||||
src,
|
||||
);
|
||||
} else {
|
||||
let response = Response::Error(ErrorResponse {
|
||||
transaction_id: request.transaction_id,
|
||||
message: "Info hash not allowed".into(),
|
||||
});
|
||||
|
||||
local_responses.push((response, src))
|
||||
}
|
||||
}
|
||||
}
|
||||
Request::Scrape(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
let split_requests = self.pending_scrape_responses.prepare_split_requests(
|
||||
&self.config,
|
||||
request,
|
||||
pending_scrape_valid_until,
|
||||
);
|
||||
|
||||
for (swarm_worker_index, request) in split_requests {
|
||||
self.request_sender.try_send_to(
|
||||
swarm_worker_index,
|
||||
ConnectedRequest::Scrape(request),
|
||||
src,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_response(
|
||||
config: &Config,
|
||||
shared_state: &State,
|
||||
socket: &mut UdpSocket,
|
||||
buffer: &mut [u8],
|
||||
opt_resend_buffer: &mut Option<Vec<(Response, CanonicalSocketAddr)>>,
|
||||
response: Response,
|
||||
canonical_addr: CanonicalSocketAddr,
|
||||
) {
|
||||
let mut cursor = Cursor::new(buffer);
|
||||
|
||||
if let Err(err) = response.write(&mut cursor) {
|
||||
::log::error!("Converting response to bytes failed: {:#}", err);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let bytes_written = cursor.position() as usize;
|
||||
|
||||
let addr = if config.network.address.is_ipv4() {
|
||||
canonical_addr
|
||||
.get_ipv4()
|
||||
.expect("found peer ipv6 address while running bound to ipv4 address")
|
||||
} else {
|
||||
canonical_addr.get_ipv6_mapped()
|
||||
};
|
||||
|
||||
match socket.send_to(&cursor.get_ref()[..bytes_written], addr) {
|
||||
Ok(amt) if config.statistics.active() => {
|
||||
let stats = if canonical_addr.is_ipv4() {
|
||||
let stats = &shared_state.statistics_ipv4;
|
||||
|
||||
stats
|
||||
.bytes_sent
|
||||
.fetch_add(amt + EXTRA_PACKET_SIZE_IPV4, Ordering::Relaxed);
|
||||
|
||||
stats
|
||||
} else {
|
||||
let stats = &shared_state.statistics_ipv6;
|
||||
|
||||
stats
|
||||
.bytes_sent
|
||||
.fetch_add(amt + EXTRA_PACKET_SIZE_IPV6, Ordering::Relaxed);
|
||||
|
||||
stats
|
||||
};
|
||||
|
||||
match response {
|
||||
Response::Connect(_) => {
|
||||
stats.responses_sent_connect.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => {
|
||||
stats
|
||||
.responses_sent_announce
|
||||
.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Scrape(_) => {
|
||||
stats.responses_sent_scrape.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
Response::Error(_) => {
|
||||
stats.responses_sent_error.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(_) => (),
|
||||
Err(err) => match opt_resend_buffer.as_mut() {
|
||||
Some(resend_buffer)
|
||||
if (err.raw_os_error() == Some(libc::ENOBUFS))
|
||||
|| (err.kind() == ErrorKind::WouldBlock) =>
|
||||
{
|
||||
if resend_buffer.len() < config.network.resend_buffer_max_len {
|
||||
::log::info!("Adding response to resend queue, since sending it to {} failed with: {:#}", addr, err);
|
||||
|
||||
resend_buffer.push((response, canonical_addr));
|
||||
} else {
|
||||
::log::warn!("Response resend buffer full, dropping response");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
::log::warn!("Sending response to {} failed: {:#}", addr, err);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
128
crates/udp/src/workers/socket/mod.rs
Normal file
128
crates/udp/src/workers/socket/mod.rs
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
mod mio;
|
||||
mod storage;
|
||||
#[cfg(feature = "io-uring")]
|
||||
mod uring;
|
||||
mod validator;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::{
|
||||
privileges::PrivilegeDropper, CanonicalSocketAddr, PanicSentinel, ServerStartInstant,
|
||||
};
|
||||
use crossbeam_channel::Receiver;
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
use crate::{
|
||||
common::{ConnectedRequestSender, ConnectedResponse, State},
|
||||
config::Config,
|
||||
};
|
||||
|
||||
pub use self::validator::ConnectionValidator;
|
||||
|
||||
/// Bytes of data transmitted when sending an IPv4 UDP packet, in addition to payload size
|
||||
///
|
||||
/// Consists of:
|
||||
/// - 8 bit ethernet frame
|
||||
/// - 14 + 4 bit MAC header and checksum
|
||||
/// - 20 bit IPv4 header
|
||||
/// - 8 bit udp header
|
||||
const EXTRA_PACKET_SIZE_IPV4: usize = 8 + 18 + 20 + 8;
|
||||
|
||||
/// Bytes of data transmitted when sending an IPv4 UDP packet, in addition to payload size
|
||||
///
|
||||
/// Consists of:
|
||||
/// - 8 bit ethernet frame
|
||||
/// - 14 + 4 bit MAC header and checksum
|
||||
/// - 40 bit IPv6 header
|
||||
/// - 8 bit udp header
|
||||
const EXTRA_PACKET_SIZE_IPV6: usize = 8 + 18 + 40 + 8;
|
||||
|
||||
pub fn run_socket_worker(
|
||||
sentinel: PanicSentinel,
|
||||
shared_state: State,
|
||||
config: Config,
|
||||
validator: ConnectionValidator,
|
||||
server_start_instant: ServerStartInstant,
|
||||
request_sender: ConnectedRequestSender,
|
||||
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
) {
|
||||
#[cfg(feature = "io-uring")]
|
||||
match self::uring::supported_on_current_kernel() {
|
||||
Ok(()) => {
|
||||
self::uring::SocketWorker::run(
|
||||
sentinel,
|
||||
shared_state,
|
||||
config,
|
||||
validator,
|
||||
server_start_instant,
|
||||
request_sender,
|
||||
response_receiver,
|
||||
priv_dropper,
|
||||
);
|
||||
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::warn!(
|
||||
"Falling back to mio because of lacking kernel io_uring support: {:#}",
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self::mio::SocketWorker::run(
|
||||
sentinel,
|
||||
shared_state,
|
||||
config,
|
||||
validator,
|
||||
server_start_instant,
|
||||
request_sender,
|
||||
response_receiver,
|
||||
priv_dropper,
|
||||
);
|
||||
}
|
||||
|
||||
fn create_socket(
|
||||
config: &Config,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
) -> anyhow::Result<::std::net::UdpSocket> {
|
||||
let socket = if config.network.address.is_ipv4() {
|
||||
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))?
|
||||
} else {
|
||||
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))?
|
||||
};
|
||||
|
||||
if config.network.only_ipv6 {
|
||||
socket
|
||||
.set_only_v6(true)
|
||||
.with_context(|| "socket: set only ipv6")?;
|
||||
}
|
||||
|
||||
socket
|
||||
.set_reuse_port(true)
|
||||
.with_context(|| "socket: set reuse port")?;
|
||||
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.with_context(|| "socket: set nonblocking")?;
|
||||
|
||||
let recv_buffer_size = config.network.socket_recv_buffer_size;
|
||||
|
||||
if recv_buffer_size != 0 {
|
||||
if let Err(err) = socket.set_recv_buffer_size(recv_buffer_size) {
|
||||
::log::error!(
|
||||
"socket: failed setting recv buffer to {}: {:?}",
|
||||
recv_buffer_size,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
socket
|
||||
.bind(&config.network.address.into())
|
||||
.with_context(|| format!("socket: bind to {}", config.network.address))?;
|
||||
|
||||
priv_dropper.after_socket_creation()?;
|
||||
|
||||
Ok(socket.into())
|
||||
}
|
||||
219
crates/udp/src/workers/socket/storage.rs
Normal file
219
crates/udp/src/workers/socket/storage.rs
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use slab::Slab;
|
||||
|
||||
use aquatic_common::{SecondsSinceServerStart, ValidUntil};
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PendingScrapeResponseSlabEntry {
|
||||
num_pending: usize,
|
||||
valid_until: ValidUntil,
|
||||
torrent_stats: BTreeMap<usize, TorrentScrapeStatistics>,
|
||||
transaction_id: TransactionId,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PendingScrapeResponseSlab(Slab<PendingScrapeResponseSlabEntry>);
|
||||
|
||||
impl PendingScrapeResponseSlab {
|
||||
pub fn prepare_split_requests(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
request: ScrapeRequest,
|
||||
valid_until: ValidUntil,
|
||||
) -> impl IntoIterator<Item = (SwarmWorkerIndex, PendingScrapeRequest)> {
|
||||
let capacity = config.swarm_workers.min(request.info_hashes.len());
|
||||
let mut split_requests: HashMap<SwarmWorkerIndex, PendingScrapeRequest> =
|
||||
HashMap::with_capacity(capacity);
|
||||
|
||||
if request.info_hashes.is_empty() {
|
||||
::log::warn!(
|
||||
"Attempted to prepare PendingScrapeResponseSlab entry with zero info hashes"
|
||||
);
|
||||
|
||||
return split_requests;
|
||||
}
|
||||
|
||||
let vacant_entry = self.0.vacant_entry();
|
||||
let slab_key = vacant_entry.key();
|
||||
|
||||
for (i, info_hash) in request.info_hashes.into_iter().enumerate() {
|
||||
let split_request = split_requests
|
||||
.entry(SwarmWorkerIndex::from_info_hash(&config, info_hash))
|
||||
.or_insert_with(|| PendingScrapeRequest {
|
||||
slab_key,
|
||||
info_hashes: BTreeMap::new(),
|
||||
});
|
||||
|
||||
split_request.info_hashes.insert(i, info_hash);
|
||||
}
|
||||
|
||||
vacant_entry.insert(PendingScrapeResponseSlabEntry {
|
||||
num_pending: split_requests.len(),
|
||||
valid_until,
|
||||
torrent_stats: Default::default(),
|
||||
transaction_id: request.transaction_id,
|
||||
});
|
||||
|
||||
split_requests
|
||||
}
|
||||
|
||||
pub fn add_and_get_finished(
|
||||
&mut self,
|
||||
response: PendingScrapeResponse,
|
||||
) -> Option<ScrapeResponse> {
|
||||
let finished = if let Some(entry) = self.0.get_mut(response.slab_key) {
|
||||
entry.num_pending -= 1;
|
||||
|
||||
entry
|
||||
.torrent_stats
|
||||
.extend(response.torrent_stats.into_iter());
|
||||
|
||||
entry.num_pending == 0
|
||||
} else {
|
||||
::log::warn!(
|
||||
"PendingScrapeResponseSlab.add didn't find entry for key {:?}",
|
||||
response.slab_key
|
||||
);
|
||||
|
||||
false
|
||||
};
|
||||
|
||||
if finished {
|
||||
let entry = self.0.remove(response.slab_key);
|
||||
|
||||
Some(ScrapeResponse {
|
||||
transaction_id: entry.transaction_id,
|
||||
torrent_stats: entry.torrent_stats.into_values().collect(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clean(&mut self, now: SecondsSinceServerStart) {
|
||||
self.0.retain(|k, v| {
|
||||
if v.valid_until.valid(now) {
|
||||
true
|
||||
} else {
|
||||
::log::warn!(
|
||||
"Unconsumed PendingScrapeResponseSlab entry. {:?}: {:?}",
|
||||
k,
|
||||
v
|
||||
);
|
||||
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
self.0.shrink_to_fit();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use quickcheck::TestResult;
|
||||
use quickcheck_macros::quickcheck;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[quickcheck]
|
||||
fn test_pending_scrape_response_slab(
|
||||
request_data: Vec<(i32, i64, u8)>,
|
||||
swarm_workers: u8,
|
||||
) -> TestResult {
|
||||
if swarm_workers == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
config.swarm_workers = swarm_workers as usize;
|
||||
|
||||
let valid_until = ValidUntil::new(ServerStartInstant::new(), 1);
|
||||
|
||||
let mut map = PendingScrapeResponseSlab::default();
|
||||
|
||||
let mut requests = Vec::new();
|
||||
|
||||
for (t, c, b) in request_data {
|
||||
if b == 0 {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
let mut info_hashes = Vec::new();
|
||||
|
||||
for i in 0..b {
|
||||
let info_hash = InfoHash([i; 20]);
|
||||
|
||||
info_hashes.push(info_hash);
|
||||
}
|
||||
|
||||
let request = ScrapeRequest {
|
||||
transaction_id: TransactionId(t),
|
||||
connection_id: ConnectionId(c),
|
||||
info_hashes,
|
||||
};
|
||||
|
||||
requests.push(request);
|
||||
}
|
||||
|
||||
let mut all_split_requests = Vec::new();
|
||||
|
||||
for request in requests.iter() {
|
||||
let split_requests =
|
||||
map.prepare_split_requests(&config, request.to_owned(), valid_until);
|
||||
|
||||
all_split_requests.push(
|
||||
split_requests
|
||||
.into_iter()
|
||||
.collect::<Vec<(SwarmWorkerIndex, PendingScrapeRequest)>>(),
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(map.0.len(), requests.len());
|
||||
|
||||
let mut responses = Vec::new();
|
||||
|
||||
for split_requests in all_split_requests {
|
||||
for (worker_index, split_request) in split_requests {
|
||||
assert!(worker_index.0 < swarm_workers as usize);
|
||||
|
||||
let torrent_stats = split_request
|
||||
.info_hashes
|
||||
.into_iter()
|
||||
.map(|(i, info_hash)| {
|
||||
(
|
||||
i,
|
||||
TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers((info_hash.0[0]) as i32),
|
||||
leechers: NumberOfPeers(0),
|
||||
completed: NumberOfDownloads(0),
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let response = PendingScrapeResponse {
|
||||
slab_key: split_request.slab_key,
|
||||
torrent_stats,
|
||||
};
|
||||
|
||||
if let Some(response) = map.add_and_get_finished(response) {
|
||||
responses.push(response);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert!(map.0.is_empty());
|
||||
assert_eq!(responses.len(), requests.len());
|
||||
|
||||
TestResult::from_bool(true)
|
||||
}
|
||||
}
|
||||
947
crates/udp/src/workers/socket/uring/buf_ring.rs
Normal file
947
crates/udp/src/workers/socket/uring/buf_ring.rs
Normal file
|
|
@ -0,0 +1,947 @@
|
|||
// Copyright (c) 2021 Carl Lerche
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any
|
||||
// person obtaining a copy of this software and associated
|
||||
// documentation files (the "Software"), to deal in the
|
||||
// Software without restriction, including without
|
||||
// limitation the rights to use, copy, modify, merge,
|
||||
// publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software
|
||||
// is furnished to do so, subject to the following
|
||||
// conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice
|
||||
// shall be included in all copies or substantial portions
|
||||
// of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
// IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
// Copied (with slight modifications) from
|
||||
// - https://github.com/FrankReh/tokio-uring/tree/9387c92c98138451f7d760432a04b0b95a406f22/src/buf/bufring
|
||||
// - https://github.com/FrankReh/tokio-uring/blob/9387c92c98138451f7d760432a04b0b95a406f22/src/buf/bufgroup/mod.rs
|
||||
|
||||
//! Module for the io_uring device's buf_ring feature.
|
||||
|
||||
// Developer's note about io_uring return codes when a buf_ring is used:
|
||||
//
|
||||
// While a buf_ring pool is exhaused, new calls to read that are, or are not, ready to read will
|
||||
// fail with the 105 error, "no buffers", while existing calls that were waiting to become ready to
|
||||
// read will not fail. Only when the data becomes ready to read will they fail, if the buffer ring
|
||||
// is still empty at that time. This makes sense when thinking about it from how the kernel
|
||||
// implements the start of a read command; it can be confusing when first working with these
|
||||
// commands from the userland perspective.
|
||||
|
||||
// While the file! calls yield the clippy false positive.
|
||||
#![allow(clippy::print_literal)]
|
||||
|
||||
use io_uring::types;
|
||||
use std::cell::Cell;
|
||||
use std::io;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::{self, AtomicU16};
|
||||
|
||||
use super::CurrentRing;
|
||||
|
||||
/// The buffer group ID.
|
||||
///
|
||||
/// The creater of a buffer group is responsible for picking a buffer group id
|
||||
/// that does not conflict with other buffer group ids also being registered with the uring
|
||||
/// interface.
|
||||
pub(crate) type Bgid = u16;
|
||||
|
||||
// Future: Maybe create a bgid module with a trivial implementation of a type that tracks the next
|
||||
// bgid to use. The crate's driver could do that perhaps, but there could be a benefit to tracking
|
||||
// them across multiple thread's drivers. So there is flexibility in not building it into the
|
||||
// driver.
|
||||
|
||||
/// The buffer ID. Buffer ids are assigned and used by the crate and probably are not visible
|
||||
/// to the crate user.
|
||||
pub(crate) type Bid = u16;
|
||||
|
||||
/// This tracks a buffer that has been filled in by the kernel, having gotten the memory
|
||||
/// from a buffer ring, and returned to userland via a cqe entry.
|
||||
pub struct BufX {
|
||||
bgroup: BufRing,
|
||||
bid: Bid,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
impl BufX {
|
||||
// # Safety
|
||||
//
|
||||
// The bid must be the buffer id supplied by the kernel as having been chosen and written to.
|
||||
// The length of the buffer must represent the length written to by the kernel.
|
||||
pub(crate) unsafe fn new(bgroup: BufRing, bid: Bid, len: usize) -> Self {
|
||||
// len will already have been checked against the buf_capacity
|
||||
// so it is guaranteed that len <= bgroup.buf_capacity.
|
||||
|
||||
Self { bgroup, bid, len }
|
||||
}
|
||||
|
||||
/// Return the number of bytes initialized.
|
||||
///
|
||||
/// This value initially came from the kernel, as reported in the cqe. This value may have been
|
||||
/// modified with a call to the IoBufMut::set_init method.
|
||||
#[inline]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Return true if this represents an empty buffer. The length reported by the kernel was 0.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
/// Return the capacity of this buffer.
|
||||
#[inline]
|
||||
pub fn cap(&self) -> usize {
|
||||
self.bgroup.buf_capacity(self.bid)
|
||||
}
|
||||
|
||||
/// Return a byte slice reference.
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[u8] {
|
||||
let p = self.bgroup.stable_ptr(self.bid);
|
||||
// Safety: the pointer returned by stable_ptr is valid for the lifetime of self,
|
||||
// and self's len is set when the kernel reports the amount of data that was
|
||||
// written into the buffer.
|
||||
unsafe { std::slice::from_raw_parts(p, self.len) }
|
||||
}
|
||||
|
||||
/// Return a mutable byte slice reference.
|
||||
#[inline]
|
||||
pub fn as_slice_mut(&mut self) -> &mut [u8] {
|
||||
let p = self.bgroup.stable_mut_ptr(self.bid);
|
||||
// Safety: the pointer returned by stable_mut_ptr is valid for the lifetime of self,
|
||||
// and self's len is set when the kernel reports the amount of data that was
|
||||
// written into the buffer. In addition, we hold a &mut reference to self.
|
||||
unsafe { std::slice::from_raw_parts_mut(p, self.len) }
|
||||
}
|
||||
|
||||
// Future: provide access to the uninit space between len and cap if the buffer is being
|
||||
// repurposed before being dropped. The set_init below does that too.
|
||||
}
|
||||
|
||||
impl Drop for BufX {
|
||||
fn drop(&mut self) {
|
||||
// Add the buffer back to the bgroup, for the kernel to reuse.
|
||||
// Safety: this function may only be called by the buffer's drop function.
|
||||
unsafe { self.bgroup.dropping_bid(self.bid) };
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
unsafe impl crate::buf::IoBuf for BufX {
|
||||
fn stable_ptr(&self) -> *const u8 {
|
||||
self.bgroup.stable_ptr(self.bid)
|
||||
}
|
||||
|
||||
fn bytes_init(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
fn bytes_total(&self) -> usize {
|
||||
self.cap()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl crate::buf::IoBufMut for BufX {
|
||||
fn stable_mut_ptr(&mut self) -> *mut u8 {
|
||||
self.bgroup.stable_mut_ptr(self.bid)
|
||||
}
|
||||
|
||||
unsafe fn set_init(&mut self, init_len: usize) {
|
||||
if self.len < init_len {
|
||||
let cap = self.bgroup.buf_capacity(self.bid);
|
||||
assert!(init_len <= cap);
|
||||
self.len = init_len;
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
impl From<BufX> for Vec<u8> {
|
||||
fn from(item: BufX) -> Self {
|
||||
item.as_slice().to_vec()
|
||||
}
|
||||
}
|
||||
|
||||
/// A `BufRing` represents the ring and the buffers used with the kernel's io_uring buf_ring
|
||||
/// feature.
|
||||
///
|
||||
/// In this implementation, it is both the ring of buffer entries and the actual buffer
|
||||
/// allocations.
|
||||
///
|
||||
/// A BufRing is created through the [`Builder`] and can be registered automatically by the
|
||||
/// builder's `build` step or at a later time by the user. Registration involves informing the
|
||||
/// kernel of the ring's dimensions and its identifier (its buffer group id, which goes by the name
|
||||
/// `bgid`).
|
||||
///
|
||||
/// Multiple buf_rings, here multiple BufRings, can be created and registered. BufRings are
|
||||
/// reference counted to ensure their memory is live while their BufX buffers are live. When a BufX
|
||||
/// buffer is dropped, it releases itself back to the BufRing from which it came allowing it to be
|
||||
/// reused by the kernel.
|
||||
///
|
||||
/// It is perhaps worth pointing out that it is the ring itself that is registered with the kernel,
|
||||
/// not the buffers per se. While a given buf_ring cannot have it size changed dynamically, the
|
||||
/// buffers that are pushed to the ring by userland, and later potentially re-pushed in the ring,
|
||||
/// can change. The buffers can be of different sizes and they could come from different allocation
|
||||
/// blocks. This implementation does not provide that flexibility. Each BufRing comes with its own
|
||||
/// equal length buffer allocation. And when a BufRing buffer, a BufX, is dropped, its id is pushed
|
||||
/// back to the ring.
|
||||
///
|
||||
/// This is the one and only `Provided Buffers` implementation in `tokio_uring` at the moment and
|
||||
/// in this version, is a purely concrete type, with a concrete BufX type for buffers that are
|
||||
/// returned by operations like `recv_provbuf` to the userland application.
|
||||
///
|
||||
/// Aside from the register and unregister steps, there are no syscalls used to pass buffers to the
|
||||
/// kernel. The ring contains a tail memory address that this userland type updates as buffers are
|
||||
/// added to the ring and which the kernel reads when it needs to pull a buffer from the ring. The
|
||||
/// kernel does not have a head pointer address that it updates for the userland. The userland
|
||||
/// (this type), is expected to avoid overwriting the head of the circular ring by keeping track of
|
||||
/// how many buffers were added to the ring and how many have been returned through the CQE
|
||||
/// mechanism. This particular implementation does not track the count because all buffers are
|
||||
/// allocated at the beginning, by the builder, and only its own buffers that came back via a CQE
|
||||
/// are ever added back to the ring, so it should be impossible to overflow the ring.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BufRing {
|
||||
// RawBufRing uses cell for fields where necessary.
|
||||
raw: Rc<RawBufRing>,
|
||||
}
|
||||
|
||||
// Methods the BufX needs.
|
||||
|
||||
impl BufRing {
|
||||
pub(crate) fn buf_capacity(&self, _: Bid) -> usize {
|
||||
self.raw.buf_capacity_i()
|
||||
}
|
||||
|
||||
pub(crate) fn stable_ptr(&self, bid: Bid) -> *const u8 {
|
||||
// Will panic if bid is out of range.
|
||||
self.raw.stable_ptr_i(bid)
|
||||
}
|
||||
|
||||
pub(crate) fn stable_mut_ptr(&mut self, bid: Bid) -> *mut u8 {
|
||||
// Safety: self is &mut, we're good.
|
||||
unsafe { self.raw.stable_mut_ptr_i(bid) }
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// `dropping_bid` should only be called by the buffer's drop function because once called, the
|
||||
// buffer may be given back to the kernel for reuse.
|
||||
pub(crate) unsafe fn dropping_bid(&self, bid: Bid) {
|
||||
self.raw.dropping_bid_i(bid);
|
||||
}
|
||||
}
|
||||
|
||||
// Methods the io operations need.
|
||||
|
||||
impl BufRing {
|
||||
pub(crate) fn bgid(&self) -> Bgid {
|
||||
self.raw.bgid()
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// The res and flags values are used to lookup a buffer and set its initialized length.
|
||||
// The caller is responsible for these being correct. This is expected to be called
|
||||
// when these two values are received from the kernel via a CQE and we rely on the kernel to
|
||||
// give us correct information.
|
||||
pub(crate) unsafe fn get_buf(&self, res: u32, flags: u32) -> io::Result<Option<BufX>> {
|
||||
let bid = match io_uring::cqueue::buffer_select(flags) {
|
||||
Some(bid) => bid,
|
||||
None => {
|
||||
// Have seen res == 0, flags == 4 with a TCP socket. res == 0 we take to mean the
|
||||
// socket is empty so return None to show there is no buffer returned, which should
|
||||
// be interpreted to mean there is no more data to read from this file or socket.
|
||||
if res == 0 {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
format!(
|
||||
"BufRing::get_buf failed as the buffer bit, IORING_CQE_F_BUFFER, was missing from flags, res = {}, flags = {}",
|
||||
res, flags)
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let len = res as usize;
|
||||
|
||||
/*
|
||||
let flags = flags & !io_uring::sys::IORING_CQE_F_BUFFER; // for tracing flags
|
||||
println!(
|
||||
"{}:{}: get_buf res({res})=len({len}) flags({:#x})->bid({bid})\n\n",
|
||||
file!(),
|
||||
line!(),
|
||||
flags
|
||||
);
|
||||
*/
|
||||
|
||||
assert!(len <= self.raw.buf_len);
|
||||
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// {
|
||||
// let mut debug_bitmap = self.debug_bitmap.borrow_mut();
|
||||
// let m = 1 << (bid % 8);
|
||||
// assert!(debug_bitmap[(bid / 8) as usize] & m == m);
|
||||
// debug_bitmap[(bid / 8) as usize] &= !m;
|
||||
// }
|
||||
|
||||
self.raw.metric_getting_another();
|
||||
/*
|
||||
println!(
|
||||
"{}:{}: get_buf cur {}, min {}",
|
||||
file!(),
|
||||
line!(),
|
||||
self.possible_cur.get(),
|
||||
self.possible_min.get(),
|
||||
);
|
||||
*/
|
||||
|
||||
// Safety: the len provided to BufX::new is given to us from the kernel.
|
||||
Ok(Some(unsafe { BufX::new(self.clone(), bid, len) }))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
/// Build the arguments to call build() that returns a [`BufRing`].
|
||||
///
|
||||
/// Refer to the methods descriptions for details.
|
||||
#[allow(dead_code)]
|
||||
pub struct Builder {
|
||||
page_size: usize,
|
||||
bgid: Bgid,
|
||||
ring_entries: u16,
|
||||
buf_cnt: u16,
|
||||
buf_len: usize,
|
||||
buf_align: usize,
|
||||
ring_pad: usize,
|
||||
bufend_align: usize,
|
||||
|
||||
skip_register: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl Builder {
|
||||
/// Create a new Builder with the given buffer group ID and defaults.
|
||||
///
|
||||
/// The buffer group ID, `bgid`, is the id the kernel's io_uring device uses to identify the
|
||||
/// provided buffer pool to use by operations that are posted to the device.
|
||||
///
|
||||
/// The user is responsible for picking a bgid that does not conflict with other buffer groups
|
||||
/// that have been registered with the same uring interface.
|
||||
pub fn new(bgid: Bgid) -> Builder {
|
||||
Builder {
|
||||
page_size: 4096,
|
||||
bgid,
|
||||
ring_entries: 128,
|
||||
buf_cnt: 0,
|
||||
buf_len: 4096,
|
||||
buf_align: 0,
|
||||
ring_pad: 0,
|
||||
bufend_align: 0,
|
||||
skip_register: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// The page size of the kernel. Defaults to 4096.
|
||||
///
|
||||
/// The io_uring device requires the BufRing is allocated on the start of a page, i.e. with a
|
||||
/// page size alignment.
|
||||
///
|
||||
/// The caller should determine the page size, and may want to cache the info if multiple buf
|
||||
/// rings are to be created. Crates are available to get this information or the user may want
|
||||
/// to call the libc sysconf directly:
|
||||
///
|
||||
/// use libc::{_SC_PAGESIZE, sysconf};
|
||||
/// let page_size: usize = unsafe { sysconf(_SC_PAGESIZE) as usize };
|
||||
pub fn page_size(mut self, page_size: usize) -> Builder {
|
||||
self.page_size = page_size;
|
||||
self
|
||||
}
|
||||
|
||||
/// The number of ring entries to create for the buffer ring.
|
||||
///
|
||||
/// This defaults to 128 or the `buf_cnt`, whichever is larger.
|
||||
///
|
||||
/// The number will be made a power of 2, and will be the maximum of the ring_entries setting
|
||||
/// and the buf_cnt setting. The interface will enforce a maximum of 2^15 (32768) so it can do
|
||||
/// rollover calculation.
|
||||
///
|
||||
/// Each ring entry is 16 bytes.
|
||||
pub fn ring_entries(mut self, ring_entries: u16) -> Builder {
|
||||
self.ring_entries = ring_entries;
|
||||
self
|
||||
}
|
||||
|
||||
/// The number of buffers to allocate. If left zero, the ring_entries value will be used and
|
||||
/// that value defaults to 128.
|
||||
pub fn buf_cnt(mut self, buf_cnt: u16) -> Builder {
|
||||
self.buf_cnt = buf_cnt;
|
||||
self
|
||||
}
|
||||
|
||||
/// The length of each allocated buffer. Defaults to 4096.
|
||||
///
|
||||
/// Non-alignment values are possible and `buf_align` can be used to allocate each buffer on
|
||||
/// an alignment buffer, even if the buffer length is not desired to equal the alignment.
|
||||
pub fn buf_len(mut self, buf_len: usize) -> Builder {
|
||||
self.buf_len = buf_len;
|
||||
self
|
||||
}
|
||||
|
||||
/// The alignment of the first buffer allocated.
|
||||
///
|
||||
/// Generally not needed.
|
||||
///
|
||||
/// The buffers are allocated right after the ring unless `ring_pad` is used and generally the
|
||||
/// buffers are allocated contiguous to one another unless the `buf_len` is set to something
|
||||
/// different.
|
||||
pub fn buf_align(mut self, buf_align: usize) -> Builder {
|
||||
self.buf_align = buf_align;
|
||||
self
|
||||
}
|
||||
|
||||
/// Pad to place after ring to ensure separation between rings and first buffer.
|
||||
///
|
||||
/// Generally not needed but may be useful if the ring's end and the buffers' start are to have
|
||||
/// some separation, perhaps for cacheline reasons.
|
||||
pub fn ring_pad(mut self, ring_pad: usize) -> Builder {
|
||||
self.ring_pad = ring_pad;
|
||||
self
|
||||
}
|
||||
|
||||
/// The alignment of the end of the buffer allocated. To keep other things out of a cache line
|
||||
/// or out of a page, if that's desired.
|
||||
pub fn bufend_align(mut self, bufend_align: usize) -> Builder {
|
||||
self.bufend_align = bufend_align;
|
||||
self
|
||||
}
|
||||
|
||||
/// Skip automatic registration. The caller can manually invoke the buf_ring.register()
|
||||
/// function later. Regardless, the unregister() method will be called automatically when the
|
||||
/// BufRing goes out of scope if the caller hadn't manually called buf_ring.unregister()
|
||||
/// already.
|
||||
pub fn skip_auto_register(mut self, skip: bool) -> Builder {
|
||||
self.skip_register = skip;
|
||||
self
|
||||
}
|
||||
|
||||
/// Return a BufRing, having computed the layout for the single aligned allocation
|
||||
/// of both the buffer ring elements and the buffers themselves.
|
||||
///
|
||||
/// If auto_register was left enabled, register the BufRing with the driver.
|
||||
pub fn build(&self) -> io::Result<BufRing> {
|
||||
let mut b: Builder = *self;
|
||||
|
||||
// Two cases where both buf_cnt and ring_entries are set to the max of the two.
|
||||
if b.buf_cnt == 0 || b.ring_entries < b.buf_cnt {
|
||||
let max = std::cmp::max(b.ring_entries, b.buf_cnt);
|
||||
b.buf_cnt = max;
|
||||
b.ring_entries = max;
|
||||
}
|
||||
|
||||
// Don't allow the next_power_of_two calculation to be done if already larger than 2^15
|
||||
// because 2^16 reads back as 0 in a u16. And the interface doesn't allow for ring_entries
|
||||
// larger than 2^15 anyway, so this is a good place to catch it. Here we return a unique
|
||||
// error that is more descriptive than the InvalidArg that would come from the interface.
|
||||
if b.ring_entries > (1 << 15) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"ring_entries exceeded 32768",
|
||||
));
|
||||
}
|
||||
|
||||
// Requirement of the interface is the ring entries is a power of two, making its and our
|
||||
// mask calculation trivial.
|
||||
b.ring_entries = b.ring_entries.next_power_of_two();
|
||||
|
||||
Ok(BufRing {
|
||||
raw: Rc::new(RawBufRing::new(NewArgs {
|
||||
page_size: b.page_size,
|
||||
bgid: b.bgid,
|
||||
ring_entries: b.ring_entries,
|
||||
buf_cnt: b.buf_cnt,
|
||||
buf_len: b.buf_len,
|
||||
buf_align: b.buf_align,
|
||||
ring_pad: b.ring_pad,
|
||||
bufend_align: b.bufend_align,
|
||||
auto_register: !b.skip_register,
|
||||
})?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Trivial helper struct for this module.
|
||||
struct NewArgs {
|
||||
page_size: usize,
|
||||
bgid: Bgid,
|
||||
ring_entries: u16,
|
||||
buf_cnt: u16,
|
||||
buf_len: usize,
|
||||
buf_align: usize,
|
||||
ring_pad: usize,
|
||||
bufend_align: usize,
|
||||
auto_register: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct RawBufRing {
|
||||
bgid: Bgid,
|
||||
|
||||
// Keep mask rather than ring size because mask is used often, ring size not.
|
||||
//ring_entries: u16, // Invariants: > 0, power of 2, max 2^15 (32768).
|
||||
ring_entries_mask: u16, // Invariant one less than ring_entries which is > 0, power of 2, max 2^15 (32768).
|
||||
|
||||
buf_cnt: u16, // Invariants: > 0, <= ring_entries.
|
||||
buf_len: usize, // Invariant: > 0.
|
||||
layout: std::alloc::Layout,
|
||||
ring_addr: *const types::BufRingEntry, // Invariant: constant.
|
||||
buffers_addr: *mut u8, // Invariant: constant.
|
||||
local_tail: Cell<u16>,
|
||||
tail_addr: *const AtomicU16,
|
||||
registered: Cell<bool>,
|
||||
|
||||
// The first `possible` field is a best effort at tracking the current buffer pool usage and
|
||||
// from that, tracking the lowest level that has been reached. The two are an attempt at
|
||||
// letting the user check the sizing needs of their buf_ring pool.
|
||||
//
|
||||
// We don't really know how deep the uring device has gone into the pool because we never see
|
||||
// its head value and it can be taking buffers from the ring, in-flight, while we add buffers
|
||||
// back to the ring. All we know is when a CQE arrives and a buffer lookup is performed, a
|
||||
// buffer has already been taken from the pool, and when the buffer is dropped, we add it back
|
||||
// to the the ring and it is about to be considered part of the pool again.
|
||||
possible_cur: Cell<u16>,
|
||||
possible_min: Cell<u16>,
|
||||
//
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// debug_bitmap: RefCell<std::vec::Vec<u8>>,
|
||||
}
|
||||
|
||||
impl RawBufRing {
|
||||
fn new(new_args: NewArgs) -> io::Result<RawBufRing> {
|
||||
#[allow(non_upper_case_globals)]
|
||||
const trace: bool = false;
|
||||
|
||||
let NewArgs {
|
||||
page_size,
|
||||
bgid,
|
||||
ring_entries,
|
||||
buf_cnt,
|
||||
buf_len,
|
||||
buf_align,
|
||||
ring_pad,
|
||||
bufend_align,
|
||||
auto_register,
|
||||
} = new_args;
|
||||
|
||||
// Check that none of the important args are zero and the ring_entries is at least large
|
||||
// enough to hold all the buffers and that ring_entries is a power of 2.
|
||||
|
||||
if (buf_cnt == 0)
|
||||
|| (buf_cnt > ring_entries)
|
||||
|| (buf_len == 0)
|
||||
|| ((ring_entries & (ring_entries - 1)) != 0)
|
||||
{
|
||||
return Err(io::Error::from(io::ErrorKind::InvalidInput));
|
||||
}
|
||||
|
||||
// entry_size is 16 bytes.
|
||||
let entry_size = std::mem::size_of::<types::BufRingEntry>();
|
||||
let mut ring_size = entry_size * (ring_entries as usize);
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: entry_size {} * ring_entries {} = ring_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
entry_size,
|
||||
ring_entries,
|
||||
ring_size,
|
||||
ring_size,
|
||||
);
|
||||
}
|
||||
|
||||
ring_size += ring_pad;
|
||||
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: after +ring_pad {} ring_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_pad,
|
||||
ring_size,
|
||||
ring_size,
|
||||
);
|
||||
}
|
||||
|
||||
if buf_align > 0 {
|
||||
let buf_align = buf_align.next_power_of_two();
|
||||
ring_size = (ring_size + (buf_align - 1)) & !(buf_align - 1);
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: after buf_align ring_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_size,
|
||||
ring_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
let buf_size = buf_len * (buf_cnt as usize);
|
||||
assert!(ring_size != 0);
|
||||
assert!(buf_size != 0);
|
||||
let mut tot_size: usize = ring_size + buf_size;
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: ring_size {} {:#x} + buf_size {} {:#x} = tot_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_size,
|
||||
ring_size,
|
||||
buf_size,
|
||||
buf_size,
|
||||
tot_size,
|
||||
tot_size
|
||||
);
|
||||
}
|
||||
if bufend_align > 0 {
|
||||
// for example, if bufend_align is 4096, would make total size a multiple of pages
|
||||
let bufend_align = bufend_align.next_power_of_two();
|
||||
tot_size = (tot_size + (bufend_align - 1)) & !(bufend_align - 1);
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: after bufend_align tot_size {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
tot_size,
|
||||
tot_size,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let align: usize = page_size; // alignment must be at least the page size
|
||||
let align = align.next_power_of_two();
|
||||
let layout = std::alloc::Layout::from_size_align(tot_size, align).unwrap();
|
||||
|
||||
assert!(layout.size() >= ring_size);
|
||||
// Safety: we are assured layout has nonzero size, we passed the assert just above.
|
||||
let ring_addr: *mut u8 = unsafe { std::alloc::alloc_zeroed(layout) };
|
||||
|
||||
// Buffers starts after the ring_size.
|
||||
// Safety: are we assured the address and the offset are in bounds because the ring_addr is
|
||||
// the value we got from the alloc call, and the layout.size was shown to be at least as
|
||||
// large as the ring_size.
|
||||
let buffers_addr: *mut u8 = unsafe { ring_addr.add(ring_size) };
|
||||
if trace {
|
||||
println!(
|
||||
"{}:{}: ring_addr {} {:#x}, layout: size {} align {}",
|
||||
file!(),
|
||||
line!(),
|
||||
ring_addr as u64,
|
||||
ring_addr as u64,
|
||||
layout.size(),
|
||||
layout.align()
|
||||
);
|
||||
println!(
|
||||
"{}:{}: buffers_addr {} {:#x}",
|
||||
file!(),
|
||||
line!(),
|
||||
buffers_addr as u64,
|
||||
buffers_addr as u64,
|
||||
);
|
||||
}
|
||||
|
||||
let ring_addr: *const types::BufRingEntry = ring_addr as _;
|
||||
|
||||
// Safety: the ring_addr passed into tail is the start of the ring. It is both the start of
|
||||
// the ring and the first entry in the ring.
|
||||
let tail_addr = unsafe { types::BufRingEntry::tail(ring_addr) } as *const AtomicU16;
|
||||
|
||||
let ring_entries_mask = ring_entries - 1;
|
||||
assert!((ring_entries & ring_entries_mask) == 0);
|
||||
|
||||
let buf_ring = RawBufRing {
|
||||
bgid,
|
||||
ring_entries_mask,
|
||||
buf_cnt,
|
||||
buf_len,
|
||||
layout,
|
||||
ring_addr,
|
||||
buffers_addr,
|
||||
local_tail: Cell::new(0),
|
||||
tail_addr,
|
||||
registered: Cell::new(false),
|
||||
possible_cur: Cell::new(0),
|
||||
possible_min: Cell::new(buf_cnt),
|
||||
//
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// debug_bitmap: RefCell::new(std::vec![0; ((buf_cnt+7)/8) as usize]),
|
||||
};
|
||||
|
||||
// Question had come up: where should the initial buffers be added to the ring?
|
||||
// Here when the ring is created, even before it is registered potentially?
|
||||
// Or after registration?
|
||||
//
|
||||
// For this type, BufRing, we are adding the buffers to the ring as the last part of creating the BufRing,
|
||||
// even before registration is optionally performed.
|
||||
//
|
||||
// We've seen the registration to be successful, even when the ring starts off empty.
|
||||
|
||||
// Add the buffers here where the ring is created.
|
||||
|
||||
for bid in 0..buf_cnt {
|
||||
buf_ring.buf_ring_add(bid);
|
||||
}
|
||||
buf_ring.buf_ring_sync();
|
||||
|
||||
// The default is to register the buffer ring right here. There is usually no reason the
|
||||
// caller should want to register it some time later.
|
||||
//
|
||||
// Perhaps the caller wants to allocate the buffer ring before the CONTEXT driver is in
|
||||
// place - that would be a reason to delay the register call until later.
|
||||
|
||||
if auto_register {
|
||||
buf_ring.register()?;
|
||||
}
|
||||
Ok(buf_ring)
|
||||
}
|
||||
|
||||
/// Register the buffer ring with the kernel.
|
||||
/// Normally this is done automatically when building a BufRing.
|
||||
///
|
||||
/// This method must be called in the context of a `tokio-uring` runtime.
|
||||
/// The registration persists for the lifetime of the runtime, unless
|
||||
/// revoked by the [`unregister`] method. Dropping the
|
||||
/// instance this method has been called on does revoke
|
||||
/// the registration and deallocate the buffer space.
|
||||
///
|
||||
/// [`unregister`]: Self::unregister
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If a `Provided Buffers` group with the same `bgid` is already registered, the function
|
||||
/// returns an error.
|
||||
fn register(&self) -> io::Result<()> {
|
||||
let bgid = self.bgid;
|
||||
//println!("{}:{}: register bgid {bgid}", file!(), line!());
|
||||
|
||||
// Future: move to separate public function so other buf_ring implementations
|
||||
// can register, and unregister, the same way.
|
||||
|
||||
let res = CurrentRing::with(|ring| unsafe {
|
||||
ring.submitter()
|
||||
.register_buf_ring(self.ring_addr as _, self.ring_entries(), bgid)
|
||||
});
|
||||
// println!("{}:{}: res {:?}", file!(), line!(), res);
|
||||
|
||||
if let Err(e) = res {
|
||||
match e.raw_os_error() {
|
||||
Some(22) => {
|
||||
// using buf_ring requires kernel 5.19 or greater.
|
||||
// TODO turn these eprintln into new, more expressive error being returned.
|
||||
// TODO what convention should we follow in this crate for adding information
|
||||
// onto an error?
|
||||
eprintln!(
|
||||
"buf_ring.register returned {e}, most likely indicating this kernel is not 5.19+",
|
||||
);
|
||||
}
|
||||
Some(17) => {
|
||||
// Registering a duplicate bgid is not allowed. There is an `unregister`
|
||||
// operations that can remove the first.
|
||||
eprintln!(
|
||||
"buf_ring.register returned `{e}`, indicating the attempted buffer group id {bgid} was already registered",
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
eprintln!("buf_ring.register returned `{e}` for group id {bgid}");
|
||||
}
|
||||
}
|
||||
return Err(e);
|
||||
};
|
||||
|
||||
self.registered.set(true);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Unregister the buffer ring from the io_uring.
|
||||
/// Normally this is done automatically when the BufRing goes out of scope.
|
||||
///
|
||||
/// Warning: requires the CONTEXT driver is already in place or will panic.
|
||||
fn unregister(&self) -> io::Result<()> {
|
||||
// If not registered, make this a no-op.
|
||||
if !self.registered.get() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.registered.set(false);
|
||||
|
||||
let bgid = self.bgid;
|
||||
|
||||
CurrentRing::with(|ring| ring.submitter().unregister_buf_ring(bgid))
|
||||
}
|
||||
|
||||
/// Returns the buffer group id.
|
||||
#[inline]
|
||||
fn bgid(&self) -> Bgid {
|
||||
self.bgid
|
||||
}
|
||||
|
||||
fn metric_getting_another(&self) {
|
||||
self.possible_cur.set(self.possible_cur.get() - 1);
|
||||
self.possible_min.set(std::cmp::min(
|
||||
self.possible_min.get(),
|
||||
self.possible_cur.get(),
|
||||
));
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// Dropping a duplicate bid is likely to cause undefined behavior
|
||||
// as the kernel uses the same buffer for different data concurrently.
|
||||
unsafe fn dropping_bid_i(&self, bid: Bid) {
|
||||
self.buf_ring_add(bid);
|
||||
self.buf_ring_sync();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn buf_capacity_i(&self) -> usize {
|
||||
self.buf_len as _
|
||||
}
|
||||
|
||||
#[inline]
|
||||
// # Panic
|
||||
//
|
||||
// This function will panic if given a bid that is not within the valid range 0..self.buf_cnt.
|
||||
fn stable_ptr_i(&self, bid: Bid) -> *const u8 {
|
||||
assert!(bid < self.buf_cnt);
|
||||
let offset: usize = self.buf_len * (bid as usize);
|
||||
// Safety: buffers_addr is an u8 pointer and was part of an allocation large enough to hold
|
||||
// buf_cnt number of buf_len buffers. buffers_addr, buf_cnt and buf_len are treated as
|
||||
// constants and bid was just asserted to be less than buf_cnt.
|
||||
unsafe { self.buffers_addr.add(offset) }
|
||||
}
|
||||
|
||||
// # Safety
|
||||
//
|
||||
// This may only be called by an owned or &mut object.
|
||||
//
|
||||
// # Panic
|
||||
// This will panic if bid is out of range.
|
||||
#[inline]
|
||||
unsafe fn stable_mut_ptr_i(&self, bid: Bid) -> *mut u8 {
|
||||
assert!(bid < self.buf_cnt);
|
||||
let offset: usize = self.buf_len * (bid as usize);
|
||||
// Safety: buffers_addr is an u8 pointer and was part of an allocation large enough to hold
|
||||
// buf_cnt number of buf_len buffers. buffers_addr, buf_cnt and buf_len are treated as
|
||||
// constants and bid was just asserted to be less than buf_cnt.
|
||||
self.buffers_addr.add(offset)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn ring_entries(&self) -> u16 {
|
||||
self.ring_entries_mask + 1
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mask(&self) -> u16 {
|
||||
self.ring_entries_mask
|
||||
}
|
||||
|
||||
// Writes to a ring entry and updates our local copy of the tail.
|
||||
//
|
||||
// Adds the buffer known by its buffer id to the buffer ring. The buffer's address and length
|
||||
// are known given its bid.
|
||||
//
|
||||
// This does not sync the new tail value. The caller should use `buf_ring_sync` for that.
|
||||
//
|
||||
// Panics if the bid is out of range.
|
||||
fn buf_ring_add(&self, bid: Bid) {
|
||||
// Compute address of current tail position, increment the local copy of the tail. Then
|
||||
// write the buffer's address, length and bid into the current tail entry.
|
||||
|
||||
let cur_tail = self.local_tail.get();
|
||||
self.local_tail.set(cur_tail.wrapping_add(1));
|
||||
let ring_idx = cur_tail & self.mask();
|
||||
|
||||
let ring_addr = self.ring_addr as *mut types::BufRingEntry;
|
||||
|
||||
// Safety:
|
||||
// 1. the pointer address (ring_addr), is set and const at self creation time,
|
||||
// and points to a block of memory at least as large as the number of ring_entries,
|
||||
// 2. the mask used to create ring_idx is one less than
|
||||
// the number of ring_entries, and ring_entries was tested to be a power of two,
|
||||
// So the address gotten by adding ring_idx entries to ring_addr is guaranteed to
|
||||
// be a valid address of a ring entry.
|
||||
let entry = unsafe { &mut *ring_addr.add(ring_idx as usize) };
|
||||
|
||||
entry.set_addr(self.stable_ptr_i(bid) as _);
|
||||
entry.set_len(self.buf_len as _);
|
||||
entry.set_bid(bid);
|
||||
|
||||
// Update accounting.
|
||||
self.possible_cur.set(self.possible_cur.get() + 1);
|
||||
|
||||
// TODO maybe later
|
||||
// #[cfg(any(debug, feature = "cautious"))]
|
||||
// {
|
||||
// let mut debug_bitmap = self.debug_bitmap.borrow_mut();
|
||||
// let m = 1 << (bid % 8);
|
||||
// assert!(debug_bitmap[(bid / 8) as usize] & m == 0);
|
||||
// debug_bitmap[(bid / 8) as usize] |= m;
|
||||
// }
|
||||
}
|
||||
|
||||
// Make 'count' new buffers visible to the kernel. Called after
|
||||
// io_uring_buf_ring_add() has been called 'count' times to fill in new
|
||||
// buffers.
|
||||
#[inline]
|
||||
fn buf_ring_sync(&self) {
|
||||
// Safety: dereferencing this raw pointer is safe. The tail_addr was computed once at init
|
||||
// to refer to the tail address in the ring and is held const for self's lifetime.
|
||||
unsafe {
|
||||
(*self.tail_addr).store(self.local_tail.get(), atomic::Ordering::Release);
|
||||
}
|
||||
// The liburing code did io_uring_smp_store_release(&br.tail, local_tail);
|
||||
}
|
||||
|
||||
// Return the possible_min buffer pool size.
|
||||
#[allow(dead_code)]
|
||||
fn possible_min(&self) -> u16 {
|
||||
self.possible_min.get()
|
||||
}
|
||||
|
||||
// Return the possible_min buffer pool size and reset to allow fresh counting going forward.
|
||||
#[allow(dead_code)]
|
||||
fn possible_min_and_reset(&self) -> u16 {
|
||||
let res = self.possible_min.get();
|
||||
self.possible_min.set(self.buf_cnt);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for RawBufRing {
|
||||
fn drop(&mut self) {
|
||||
if self.registered.get() {
|
||||
_ = self.unregister();
|
||||
}
|
||||
// Safety: the ptr and layout are treated as constant, and ptr (ring_addr) was assigned by
|
||||
// a call to std::alloc::alloc_zeroed using the same layout.
|
||||
unsafe { std::alloc::dealloc(self.ring_addr as *mut u8, self.layout) };
|
||||
}
|
||||
}
|
||||
548
crates/udp/src/workers/socket/uring/mod.rs
Normal file
548
crates/udp/src/workers/socket/uring/mod.rs
Normal file
|
|
@ -0,0 +1,548 @@
|
|||
mod buf_ring;
|
||||
mod recv_helper;
|
||||
mod send_buffers;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::UdpSocket;
|
||||
use std::ops::DerefMut;
|
||||
use std::os::fd::AsRawFd;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::AccessListCache;
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use crossbeam_channel::Receiver;
|
||||
use io_uring::opcode::Timeout;
|
||||
use io_uring::types::{Fixed, Timespec};
|
||||
use io_uring::{IoUring, Probe};
|
||||
|
||||
use aquatic_common::{
|
||||
access_list::create_access_list_cache, privileges::PrivilegeDropper, CanonicalSocketAddr,
|
||||
PanicSentinel, ValidUntil,
|
||||
};
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use self::buf_ring::BufRing;
|
||||
use self::recv_helper::RecvHelper;
|
||||
use self::send_buffers::{ResponseType, SendBuffers};
|
||||
|
||||
use super::storage::PendingScrapeResponseSlab;
|
||||
use super::validator::ConnectionValidator;
|
||||
use super::{create_socket, EXTRA_PACKET_SIZE_IPV4, EXTRA_PACKET_SIZE_IPV6};
|
||||
|
||||
/// Size of each request buffer
|
||||
///
|
||||
/// Enough for scrape request with 20 info hashes
|
||||
const REQUEST_BUF_LEN: usize = 256;
|
||||
|
||||
/// Size of each response buffer
|
||||
///
|
||||
/// Enough for:
|
||||
/// - IPv6 announce response with 112 peers
|
||||
/// - scrape response for 170 info hashes
|
||||
const RESPONSE_BUF_LEN: usize = 2048;
|
||||
|
||||
const USER_DATA_RECV: u64 = u64::MAX;
|
||||
const USER_DATA_PULSE_TIMEOUT: u64 = u64::MAX - 1;
|
||||
const USER_DATA_CLEANING_TIMEOUT: u64 = u64::MAX - 2;
|
||||
|
||||
const SOCKET_IDENTIFIER: Fixed = Fixed(0);
|
||||
|
||||
thread_local! {
|
||||
/// Store IoUring instance here so that it can be accessed in BufRing::drop
|
||||
pub static CURRENT_RING: CurrentRing = CurrentRing(RefCell::new(None));
|
||||
}
|
||||
|
||||
pub struct CurrentRing(RefCell<Option<IoUring>>);
|
||||
|
||||
impl CurrentRing {
|
||||
fn with<F, T>(mut f: F) -> T
|
||||
where
|
||||
F: FnMut(&mut IoUring) -> T,
|
||||
{
|
||||
CURRENT_RING.with(|r| {
|
||||
let mut opt_ring = r.0.borrow_mut();
|
||||
|
||||
f(Option::as_mut(opt_ring.deref_mut()).expect("IoUring not set"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SocketWorker {
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
request_sender: ConnectedRequestSender,
|
||||
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
access_list_cache: AccessListCache,
|
||||
validator: ConnectionValidator,
|
||||
server_start_instant: ServerStartInstant,
|
||||
#[allow(dead_code)]
|
||||
socket: UdpSocket,
|
||||
pending_scrape_responses: PendingScrapeResponseSlab,
|
||||
buf_ring: BufRing,
|
||||
send_buffers: SendBuffers,
|
||||
recv_helper: RecvHelper,
|
||||
local_responses: VecDeque<(Response, CanonicalSocketAddr)>,
|
||||
resubmittable_sqe_buf: Vec<io_uring::squeue::Entry>,
|
||||
recv_sqe: io_uring::squeue::Entry,
|
||||
pulse_timeout_sqe: io_uring::squeue::Entry,
|
||||
cleaning_timeout_sqe: io_uring::squeue::Entry,
|
||||
pending_scrape_valid_until: ValidUntil,
|
||||
}
|
||||
|
||||
impl SocketWorker {
|
||||
pub fn run(
|
||||
_sentinel: PanicSentinel,
|
||||
shared_state: State,
|
||||
config: Config,
|
||||
validator: ConnectionValidator,
|
||||
server_start_instant: ServerStartInstant,
|
||||
request_sender: ConnectedRequestSender,
|
||||
response_receiver: Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
priv_dropper: PrivilegeDropper,
|
||||
) {
|
||||
let ring_entries = config.network.ring_size.next_power_of_two();
|
||||
// Try to fill up the ring with send requests
|
||||
let send_buffer_entries = ring_entries;
|
||||
|
||||
let socket = create_socket(&config, priv_dropper).expect("create socket");
|
||||
let access_list_cache = create_access_list_cache(&shared_state.access_list);
|
||||
let send_buffers = SendBuffers::new(&config, send_buffer_entries as usize);
|
||||
let recv_helper = RecvHelper::new(&config);
|
||||
|
||||
let ring = IoUring::builder()
|
||||
.setup_coop_taskrun()
|
||||
.setup_single_issuer()
|
||||
.setup_submit_all()
|
||||
.build(ring_entries.into())
|
||||
.unwrap();
|
||||
|
||||
ring.submitter()
|
||||
.register_files(&[socket.as_raw_fd()])
|
||||
.unwrap();
|
||||
|
||||
// Store ring in thread local storage before creating BufRing
|
||||
CURRENT_RING.with(|r| *r.0.borrow_mut() = Some(ring));
|
||||
|
||||
let buf_ring = buf_ring::Builder::new(0)
|
||||
.ring_entries(ring_entries)
|
||||
.buf_len(REQUEST_BUF_LEN)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let recv_sqe = recv_helper.create_entry(buf_ring.bgid().try_into().unwrap());
|
||||
|
||||
// This timeout enables regular updates of pending_scrape_valid_until
|
||||
// and wakes the main loop to send any pending responses in the case
|
||||
// of no incoming requests
|
||||
let pulse_timeout_sqe = {
|
||||
let timespec_ptr = Box::into_raw(Box::new(Timespec::new().sec(1))) as *const _;
|
||||
|
||||
Timeout::new(timespec_ptr)
|
||||
.build()
|
||||
.user_data(USER_DATA_PULSE_TIMEOUT)
|
||||
};
|
||||
|
||||
let cleaning_timeout_sqe = {
|
||||
let timespec_ptr = Box::into_raw(Box::new(
|
||||
Timespec::new().sec(config.cleaning.pending_scrape_cleaning_interval),
|
||||
)) as *const _;
|
||||
|
||||
Timeout::new(timespec_ptr)
|
||||
.build()
|
||||
.user_data(USER_DATA_CLEANING_TIMEOUT)
|
||||
};
|
||||
|
||||
let resubmittable_sqe_buf = vec![
|
||||
recv_sqe.clone(),
|
||||
pulse_timeout_sqe.clone(),
|
||||
cleaning_timeout_sqe.clone(),
|
||||
];
|
||||
|
||||
let pending_scrape_valid_until =
|
||||
ValidUntil::new(server_start_instant, config.cleaning.max_pending_scrape_age);
|
||||
|
||||
let mut worker = Self {
|
||||
config,
|
||||
shared_state,
|
||||
validator,
|
||||
server_start_instant,
|
||||
request_sender,
|
||||
response_receiver,
|
||||
access_list_cache,
|
||||
pending_scrape_responses: Default::default(),
|
||||
send_buffers,
|
||||
recv_helper,
|
||||
local_responses: Default::default(),
|
||||
buf_ring,
|
||||
recv_sqe,
|
||||
pulse_timeout_sqe,
|
||||
cleaning_timeout_sqe,
|
||||
resubmittable_sqe_buf,
|
||||
socket,
|
||||
pending_scrape_valid_until,
|
||||
};
|
||||
|
||||
CurrentRing::with(|ring| worker.run_inner(ring));
|
||||
}
|
||||
|
||||
fn run_inner(&mut self, ring: &mut IoUring) {
|
||||
loop {
|
||||
for sqe in self.resubmittable_sqe_buf.drain(..) {
|
||||
unsafe { ring.submission().push(&sqe).unwrap() };
|
||||
}
|
||||
|
||||
let sq_space = {
|
||||
let sq = ring.submission();
|
||||
|
||||
sq.capacity() - sq.len()
|
||||
};
|
||||
|
||||
let mut num_send_added = 0;
|
||||
|
||||
// Enqueue local responses
|
||||
for _ in 0..sq_space {
|
||||
if let Some((response, addr)) = self.local_responses.pop_front() {
|
||||
match self.send_buffers.prepare_entry(&response, addr) {
|
||||
Ok(entry) => {
|
||||
unsafe { ring.submission().push(&entry).unwrap() };
|
||||
|
||||
num_send_added += 1;
|
||||
}
|
||||
Err(send_buffers::Error::NoBuffers) => {
|
||||
self.local_responses.push_front((response, addr));
|
||||
|
||||
break;
|
||||
}
|
||||
Err(send_buffers::Error::SerializationFailed(err)) => {
|
||||
::log::error!("Failed serializing response: {:#}", err);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue swarm worker responses
|
||||
for _ in 0..(sq_space - num_send_added) {
|
||||
if let Some((response, addr)) = self.get_next_swarm_response() {
|
||||
match self.send_buffers.prepare_entry(&response, addr) {
|
||||
Ok(entry) => {
|
||||
unsafe { ring.submission().push(&entry).unwrap() };
|
||||
|
||||
num_send_added += 1;
|
||||
}
|
||||
Err(send_buffers::Error::NoBuffers) => {
|
||||
self.local_responses.push_back((response, addr));
|
||||
|
||||
break;
|
||||
}
|
||||
Err(send_buffers::Error::SerializationFailed(err)) => {
|
||||
::log::error!("Failed serializing response: {:#}", err);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all sendmsg entries to complete. If none were added,
|
||||
// wait for at least one recvmsg or timeout in order to avoid
|
||||
// busy-polling if there is no incoming data.
|
||||
ring.submitter()
|
||||
.submit_and_wait(num_send_added.max(1))
|
||||
.unwrap();
|
||||
|
||||
for cqe in ring.completion() {
|
||||
self.handle_cqe(cqe);
|
||||
}
|
||||
|
||||
self.send_buffers.reset_likely_next_free_index();
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_cqe(&mut self, cqe: io_uring::cqueue::Entry) {
|
||||
match cqe.user_data() {
|
||||
USER_DATA_RECV => {
|
||||
self.handle_recv_cqe(&cqe);
|
||||
|
||||
if !io_uring::cqueue::more(cqe.flags()) {
|
||||
self.resubmittable_sqe_buf.push(self.recv_sqe.clone());
|
||||
}
|
||||
}
|
||||
USER_DATA_PULSE_TIMEOUT => {
|
||||
self.pending_scrape_valid_until = ValidUntil::new(
|
||||
self.server_start_instant,
|
||||
self.config.cleaning.max_pending_scrape_age,
|
||||
);
|
||||
|
||||
::log::info!(
|
||||
"pending responses: {} local, {} swarm",
|
||||
self.local_responses.len(),
|
||||
self.response_receiver.len()
|
||||
);
|
||||
|
||||
self.resubmittable_sqe_buf
|
||||
.push(self.pulse_timeout_sqe.clone());
|
||||
}
|
||||
USER_DATA_CLEANING_TIMEOUT => {
|
||||
self.pending_scrape_responses
|
||||
.clean(self.server_start_instant.seconds_elapsed());
|
||||
|
||||
self.resubmittable_sqe_buf
|
||||
.push(self.cleaning_timeout_sqe.clone());
|
||||
}
|
||||
send_buffer_index => {
|
||||
let result = cqe.result();
|
||||
|
||||
if result < 0 {
|
||||
::log::error!(
|
||||
"Couldn't send response: {:#}",
|
||||
::std::io::Error::from_raw_os_error(-result)
|
||||
);
|
||||
} else if self.config.statistics.active() {
|
||||
let send_buffer_index = send_buffer_index as usize;
|
||||
|
||||
let (response_type, receiver_is_ipv4) =
|
||||
self.send_buffers.response_type_and_ipv4(send_buffer_index);
|
||||
|
||||
let (statistics, extra_bytes) = if receiver_is_ipv4 {
|
||||
(&self.shared_state.statistics_ipv4, EXTRA_PACKET_SIZE_IPV4)
|
||||
} else {
|
||||
(&self.shared_state.statistics_ipv6, EXTRA_PACKET_SIZE_IPV6)
|
||||
};
|
||||
|
||||
statistics
|
||||
.bytes_sent
|
||||
.fetch_add(result as usize + extra_bytes, Ordering::Relaxed);
|
||||
|
||||
let response_counter = match response_type {
|
||||
ResponseType::Connect => &statistics.responses_sent_connect,
|
||||
ResponseType::Announce => &statistics.responses_sent_announce,
|
||||
ResponseType::Scrape => &statistics.responses_sent_scrape,
|
||||
ResponseType::Error => &statistics.responses_sent_error,
|
||||
};
|
||||
|
||||
response_counter.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Safety: OK because cqe using buffer has been returned and
|
||||
// contents will no longer be accessed by kernel
|
||||
unsafe {
|
||||
self.send_buffers
|
||||
.mark_buffer_as_free(send_buffer_index as usize);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_recv_cqe(&mut self, cqe: &io_uring::cqueue::Entry) {
|
||||
let result = cqe.result();
|
||||
|
||||
if result < 0 {
|
||||
if -result == libc::ENOBUFS {
|
||||
::log::info!("recv failed due to lack of buffers. If increasing ring size doesn't help, get faster hardware");
|
||||
} else {
|
||||
::log::warn!(
|
||||
"recv failed: {:#}",
|
||||
::std::io::Error::from_raw_os_error(-result)
|
||||
);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let buffer = unsafe {
|
||||
match self.buf_ring.get_buf(result as u32, cqe.flags()) {
|
||||
Ok(Some(buffer)) => buffer,
|
||||
Ok(None) => {
|
||||
::log::error!("Couldn't get recv buffer");
|
||||
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
::log::error!("Couldn't get recv buffer: {:#}", err);
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let buffer = buffer.as_slice();
|
||||
|
||||
let addr = match self.recv_helper.parse(buffer) {
|
||||
Ok((request, addr)) => {
|
||||
self.handle_request(request, addr);
|
||||
|
||||
addr
|
||||
}
|
||||
Err(self::recv_helper::Error::RequestParseError(err, addr)) => {
|
||||
match err {
|
||||
RequestParseError::Sendable {
|
||||
connection_id,
|
||||
transaction_id,
|
||||
err,
|
||||
} => {
|
||||
::log::debug!("Couldn't parse request from {:?}: {}", addr, err);
|
||||
|
||||
if self.validator.connection_id_valid(addr, connection_id) {
|
||||
let response = ErrorResponse {
|
||||
transaction_id,
|
||||
message: err.right_or("Parse error").into(),
|
||||
};
|
||||
|
||||
self.local_responses.push_back((response.into(), addr));
|
||||
}
|
||||
}
|
||||
RequestParseError::Unsendable { err } => {
|
||||
::log::debug!("Couldn't parse request from {:?}: {}", addr, err);
|
||||
}
|
||||
}
|
||||
|
||||
addr
|
||||
}
|
||||
Err(self::recv_helper::Error::InvalidSocketAddress) => {
|
||||
::log::debug!("Ignored request claiming to be from port 0");
|
||||
|
||||
return;
|
||||
}
|
||||
Err(self::recv_helper::Error::RecvMsgParseError) => {
|
||||
::log::error!("RecvMsgOut::parse failed");
|
||||
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if self.config.statistics.active() {
|
||||
let (statistics, extra_bytes) = if addr.is_ipv4() {
|
||||
(&self.shared_state.statistics_ipv4, EXTRA_PACKET_SIZE_IPV4)
|
||||
} else {
|
||||
(&self.shared_state.statistics_ipv6, EXTRA_PACKET_SIZE_IPV6)
|
||||
};
|
||||
|
||||
statistics
|
||||
.bytes_received
|
||||
.fetch_add(buffer.len() + extra_bytes, Ordering::Relaxed);
|
||||
statistics.requests_received.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_request(&mut self, request: Request, src: CanonicalSocketAddr) {
|
||||
let access_list_mode = self.config.access_list.mode;
|
||||
|
||||
match request {
|
||||
Request::Connect(request) => {
|
||||
let connection_id = self.validator.create_connection_id(src);
|
||||
|
||||
let response = Response::Connect(ConnectResponse {
|
||||
connection_id,
|
||||
transaction_id: request.transaction_id,
|
||||
});
|
||||
|
||||
self.local_responses.push_back((response, src));
|
||||
}
|
||||
Request::Announce(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
if self
|
||||
.access_list_cache
|
||||
.load()
|
||||
.allows(access_list_mode, &request.info_hash.0)
|
||||
{
|
||||
let worker_index =
|
||||
SwarmWorkerIndex::from_info_hash(&self.config, request.info_hash);
|
||||
|
||||
self.request_sender.try_send_to(
|
||||
worker_index,
|
||||
ConnectedRequest::Announce(request),
|
||||
src,
|
||||
);
|
||||
} else {
|
||||
let response = Response::Error(ErrorResponse {
|
||||
transaction_id: request.transaction_id,
|
||||
message: "Info hash not allowed".into(),
|
||||
});
|
||||
|
||||
self.local_responses.push_back((response, src))
|
||||
}
|
||||
}
|
||||
}
|
||||
Request::Scrape(request) => {
|
||||
if self
|
||||
.validator
|
||||
.connection_id_valid(src, request.connection_id)
|
||||
{
|
||||
let split_requests = self.pending_scrape_responses.prepare_split_requests(
|
||||
&self.config,
|
||||
request,
|
||||
self.pending_scrape_valid_until,
|
||||
);
|
||||
|
||||
for (swarm_worker_index, request) in split_requests {
|
||||
self.request_sender.try_send_to(
|
||||
swarm_worker_index,
|
||||
ConnectedRequest::Scrape(request),
|
||||
src,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_next_swarm_response(&mut self) -> Option<(Response, CanonicalSocketAddr)> {
|
||||
loop {
|
||||
match self.response_receiver.try_recv() {
|
||||
Ok((ConnectedResponse::AnnounceIpv4(response), addr)) => {
|
||||
return Some((Response::AnnounceIpv4(response), addr));
|
||||
}
|
||||
Ok((ConnectedResponse::AnnounceIpv6(response), addr)) => {
|
||||
return Some((Response::AnnounceIpv6(response), addr));
|
||||
}
|
||||
Ok((ConnectedResponse::Scrape(response), addr)) => {
|
||||
if let Some(response) =
|
||||
self.pending_scrape_responses.add_and_get_finished(response)
|
||||
{
|
||||
return Some((Response::Scrape(response), addr));
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn supported_on_current_kernel() -> anyhow::Result<()> {
|
||||
let opcodes = [
|
||||
// We can't probe for RecvMsgMulti, so we probe for SendZc, which was
|
||||
// also introduced in Linux 6.0
|
||||
io_uring::opcode::SendZc::CODE,
|
||||
];
|
||||
|
||||
let ring = IoUring::new(1).with_context(|| "create ring")?;
|
||||
|
||||
let mut probe = Probe::new();
|
||||
|
||||
ring.submitter()
|
||||
.register_probe(&mut probe)
|
||||
.with_context(|| "register probe")?;
|
||||
|
||||
for opcode in opcodes {
|
||||
if !probe.is_supported(opcode) {
|
||||
return Err(anyhow::anyhow!(
|
||||
"io_uring opcode {:b} not supported",
|
||||
opcode
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
145
crates/udp/src/workers/socket/uring/recv_helper.rs
Normal file
145
crates/udp/src/workers/socket/uring/recv_helper.rs
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
use std::{
|
||||
cell::UnsafeCell,
|
||||
net::{Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
|
||||
ptr::null_mut,
|
||||
};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::{Request, RequestParseError};
|
||||
use io_uring::{opcode::RecvMsgMulti, types::RecvMsgOut};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::{SOCKET_IDENTIFIER, USER_DATA_RECV};
|
||||
|
||||
pub enum Error {
|
||||
RecvMsgParseError,
|
||||
RequestParseError(RequestParseError, CanonicalSocketAddr),
|
||||
InvalidSocketAddress,
|
||||
}
|
||||
|
||||
pub struct RecvHelper {
|
||||
socket_is_ipv4: bool,
|
||||
max_scrape_torrents: u8,
|
||||
#[allow(dead_code)]
|
||||
name_v4: Box<UnsafeCell<libc::sockaddr_in>>,
|
||||
msghdr_v4: Box<UnsafeCell<libc::msghdr>>,
|
||||
#[allow(dead_code)]
|
||||
name_v6: Box<UnsafeCell<libc::sockaddr_in6>>,
|
||||
msghdr_v6: Box<UnsafeCell<libc::msghdr>>,
|
||||
}
|
||||
|
||||
impl RecvHelper {
|
||||
pub fn new(config: &Config) -> Self {
|
||||
let name_v4 = Box::new(UnsafeCell::new(libc::sockaddr_in {
|
||||
sin_family: 0,
|
||||
sin_port: 0,
|
||||
sin_addr: libc::in_addr { s_addr: 0 },
|
||||
sin_zero: [0; 8],
|
||||
}));
|
||||
|
||||
let msghdr_v4 = Box::new(UnsafeCell::new(libc::msghdr {
|
||||
msg_name: name_v4.get() as *mut libc::c_void,
|
||||
msg_namelen: core::mem::size_of::<libc::sockaddr_in>() as u32,
|
||||
msg_iov: null_mut(),
|
||||
msg_iovlen: 0,
|
||||
msg_control: null_mut(),
|
||||
msg_controllen: 0,
|
||||
msg_flags: 0,
|
||||
}));
|
||||
|
||||
let name_v6 = Box::new(UnsafeCell::new(libc::sockaddr_in6 {
|
||||
sin6_family: 0,
|
||||
sin6_port: 0,
|
||||
sin6_flowinfo: 0,
|
||||
sin6_addr: libc::in6_addr { s6_addr: [0; 16] },
|
||||
sin6_scope_id: 0,
|
||||
}));
|
||||
|
||||
let msghdr_v6 = Box::new(UnsafeCell::new(libc::msghdr {
|
||||
msg_name: name_v6.get() as *mut libc::c_void,
|
||||
msg_namelen: core::mem::size_of::<libc::sockaddr_in6>() as u32,
|
||||
msg_iov: null_mut(),
|
||||
msg_iovlen: 0,
|
||||
msg_control: null_mut(),
|
||||
msg_controllen: 0,
|
||||
msg_flags: 0,
|
||||
}));
|
||||
|
||||
Self {
|
||||
socket_is_ipv4: config.network.address.is_ipv4(),
|
||||
max_scrape_torrents: config.protocol.max_scrape_torrents,
|
||||
name_v4,
|
||||
msghdr_v4,
|
||||
name_v6,
|
||||
msghdr_v6,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_entry(&self, buf_group: u16) -> io_uring::squeue::Entry {
|
||||
let msghdr: *const libc::msghdr = if self.socket_is_ipv4 {
|
||||
self.msghdr_v4.get()
|
||||
} else {
|
||||
self.msghdr_v6.get()
|
||||
};
|
||||
|
||||
RecvMsgMulti::new(SOCKET_IDENTIFIER, msghdr, buf_group)
|
||||
.build()
|
||||
.user_data(USER_DATA_RECV)
|
||||
}
|
||||
|
||||
pub fn parse(&self, buffer: &[u8]) -> Result<(Request, CanonicalSocketAddr), Error> {
|
||||
let (msg, addr) = if self.socket_is_ipv4 {
|
||||
let msg = unsafe {
|
||||
let msghdr = &*(self.msghdr_v4.get() as *const _);
|
||||
|
||||
RecvMsgOut::parse(buffer, msghdr).map_err(|_| Error::RecvMsgParseError)?
|
||||
};
|
||||
|
||||
let addr = unsafe {
|
||||
let name_data = *(msg.name_data().as_ptr() as *const libc::sockaddr_in);
|
||||
|
||||
SocketAddr::V4(SocketAddrV4::new(
|
||||
u32::from_be(name_data.sin_addr.s_addr).into(),
|
||||
u16::from_be(name_data.sin_port),
|
||||
))
|
||||
};
|
||||
|
||||
if addr.port() == 0 {
|
||||
return Err(Error::InvalidSocketAddress);
|
||||
}
|
||||
|
||||
(msg, addr)
|
||||
} else {
|
||||
let msg = unsafe {
|
||||
let msghdr = &*(self.msghdr_v6.get() as *const _);
|
||||
|
||||
RecvMsgOut::parse(buffer, msghdr).map_err(|_| Error::RecvMsgParseError)?
|
||||
};
|
||||
|
||||
let addr = unsafe {
|
||||
let name_data = *(msg.name_data().as_ptr() as *const libc::sockaddr_in6);
|
||||
|
||||
SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::from(name_data.sin6_addr.s6_addr),
|
||||
u16::from_be(name_data.sin6_port),
|
||||
u32::from_be(name_data.sin6_flowinfo),
|
||||
u32::from_be(name_data.sin6_scope_id),
|
||||
))
|
||||
};
|
||||
|
||||
if addr.port() == 0 {
|
||||
return Err(Error::InvalidSocketAddress);
|
||||
}
|
||||
|
||||
(msg, addr)
|
||||
};
|
||||
|
||||
let addr = CanonicalSocketAddr::new(addr);
|
||||
|
||||
let request = Request::from_bytes(msg.payload_data(), self.max_scrape_torrents)
|
||||
.map_err(|err| Error::RequestParseError(err, addr))?;
|
||||
|
||||
Ok((request, addr))
|
||||
}
|
||||
}
|
||||
251
crates/udp/src/workers/socket/uring/send_buffers.rs
Normal file
251
crates/udp/src/workers/socket/uring/send_buffers.rs
Normal file
|
|
@ -0,0 +1,251 @@
|
|||
use std::{cell::UnsafeCell, io::Cursor, net::SocketAddr, ops::IndexMut, ptr::null_mut};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::Response;
|
||||
use io_uring::opcode::SendMsg;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
use super::{RESPONSE_BUF_LEN, SOCKET_IDENTIFIER};
|
||||
|
||||
pub enum Error {
|
||||
NoBuffers,
|
||||
SerializationFailed(std::io::Error),
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum ResponseType {
|
||||
Connect,
|
||||
Announce,
|
||||
Scrape,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl ResponseType {
|
||||
fn from_response(response: &Response) -> Self {
|
||||
match response {
|
||||
Response::Connect(_) => Self::Connect,
|
||||
Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => Self::Announce,
|
||||
Response::Scrape(_) => Self::Scrape,
|
||||
Response::Error(_) => Self::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct SendBuffer {
|
||||
name_v4: UnsafeCell<libc::sockaddr_in>,
|
||||
name_v6: UnsafeCell<libc::sockaddr_in6>,
|
||||
bytes: UnsafeCell<[u8; RESPONSE_BUF_LEN]>,
|
||||
iovec: UnsafeCell<libc::iovec>,
|
||||
msghdr: UnsafeCell<libc::msghdr>,
|
||||
free: bool,
|
||||
/// Only used for statistics
|
||||
receiver_is_ipv4: bool,
|
||||
/// Only used for statistics
|
||||
response_type: ResponseType,
|
||||
}
|
||||
|
||||
impl SendBuffer {
|
||||
fn new_with_null_pointers() -> Self {
|
||||
Self {
|
||||
name_v4: UnsafeCell::new(libc::sockaddr_in {
|
||||
sin_family: libc::AF_INET as u16,
|
||||
sin_port: 0,
|
||||
sin_addr: libc::in_addr { s_addr: 0 },
|
||||
sin_zero: [0; 8],
|
||||
}),
|
||||
name_v6: UnsafeCell::new(libc::sockaddr_in6 {
|
||||
sin6_family: libc::AF_INET6 as u16,
|
||||
sin6_port: 0,
|
||||
sin6_flowinfo: 0,
|
||||
sin6_addr: libc::in6_addr { s6_addr: [0; 16] },
|
||||
sin6_scope_id: 0,
|
||||
}),
|
||||
bytes: UnsafeCell::new([0; RESPONSE_BUF_LEN]),
|
||||
iovec: UnsafeCell::new(libc::iovec {
|
||||
iov_base: null_mut(),
|
||||
iov_len: 0,
|
||||
}),
|
||||
msghdr: UnsafeCell::new(libc::msghdr {
|
||||
msg_name: null_mut(),
|
||||
msg_namelen: 0,
|
||||
msg_iov: null_mut(),
|
||||
msg_iovlen: 1,
|
||||
msg_control: null_mut(),
|
||||
msg_controllen: 0,
|
||||
msg_flags: 0,
|
||||
}),
|
||||
free: true,
|
||||
receiver_is_ipv4: true,
|
||||
response_type: ResponseType::Connect,
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_pointers(&mut self, socket_is_ipv4: bool) {
|
||||
unsafe {
|
||||
let iovec = &mut *self.iovec.get();
|
||||
|
||||
iovec.iov_base = self.bytes.get() as *mut libc::c_void;
|
||||
iovec.iov_len = (&*self.bytes.get()).len();
|
||||
|
||||
let msghdr = &mut *self.msghdr.get();
|
||||
|
||||
msghdr.msg_iov = self.iovec.get();
|
||||
|
||||
if socket_is_ipv4 {
|
||||
msghdr.msg_name = self.name_v4.get() as *mut libc::c_void;
|
||||
msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in>() as u32;
|
||||
} else {
|
||||
msghdr.msg_name = self.name_v6.get() as *mut libc::c_void;
|
||||
msghdr.msg_namelen = core::mem::size_of::<libc::sockaddr_in6>() as u32;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// - SendBuffer must be stored at a fixed location in memory
|
||||
/// - SendBuffer.setup_pointers must have been called while stored at that
|
||||
/// fixed location
|
||||
/// - Contents of struct fields wrapped in UnsafeCell can NOT be accessed
|
||||
/// simultaneously to this function call
|
||||
unsafe fn prepare_entry(
|
||||
&mut self,
|
||||
response: &Response,
|
||||
addr: CanonicalSocketAddr,
|
||||
socket_is_ipv4: bool,
|
||||
) -> Result<io_uring::squeue::Entry, Error> {
|
||||
// Set receiver socket addr
|
||||
if socket_is_ipv4 {
|
||||
self.receiver_is_ipv4 = true;
|
||||
|
||||
let addr = if let Some(SocketAddr::V4(addr)) = addr.get_ipv4() {
|
||||
addr
|
||||
} else {
|
||||
panic!("ipv6 address in ipv4 mode");
|
||||
};
|
||||
|
||||
let name = &mut *self.name_v4.get();
|
||||
|
||||
name.sin_port = addr.port().to_be();
|
||||
name.sin_addr.s_addr = u32::from(*addr.ip()).to_be();
|
||||
} else {
|
||||
// Set receiver protocol type before calling addr.get_ipv6_mapped()
|
||||
self.receiver_is_ipv4 = addr.is_ipv4();
|
||||
|
||||
let addr = if let SocketAddr::V6(addr) = addr.get_ipv6_mapped() {
|
||||
addr
|
||||
} else {
|
||||
panic!("ipv4 address when ipv6 or ipv6-mapped address expected");
|
||||
};
|
||||
|
||||
let name = &mut *self.name_v6.get();
|
||||
|
||||
name.sin6_port = addr.port().to_be();
|
||||
name.sin6_addr.s6_addr = addr.ip().octets();
|
||||
}
|
||||
|
||||
let bytes = (&mut *self.bytes.get()).as_mut_slice();
|
||||
|
||||
let mut cursor = Cursor::new(bytes);
|
||||
|
||||
match response.write(&mut cursor) {
|
||||
Ok(()) => {
|
||||
(&mut *self.iovec.get()).iov_len = cursor.position() as usize;
|
||||
|
||||
self.response_type = ResponseType::from_response(response);
|
||||
self.free = false;
|
||||
|
||||
Ok(SendMsg::new(SOCKET_IDENTIFIER, self.msghdr.get()).build())
|
||||
}
|
||||
Err(err) => Err(Error::SerializationFailed(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SendBuffers {
|
||||
likely_next_free_index: usize,
|
||||
socket_is_ipv4: bool,
|
||||
buffers: Box<[SendBuffer]>,
|
||||
}
|
||||
|
||||
impl SendBuffers {
|
||||
pub fn new(config: &Config, capacity: usize) -> Self {
|
||||
let socket_is_ipv4 = config.network.address.is_ipv4();
|
||||
|
||||
let mut buffers = ::std::iter::repeat_with(|| SendBuffer::new_with_null_pointers())
|
||||
.take(capacity)
|
||||
.collect::<Vec<_>>()
|
||||
.into_boxed_slice();
|
||||
|
||||
for buffer in buffers.iter_mut() {
|
||||
buffer.setup_pointers(socket_is_ipv4);
|
||||
}
|
||||
|
||||
Self {
|
||||
likely_next_free_index: 0,
|
||||
socket_is_ipv4,
|
||||
buffers,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn response_type_and_ipv4(&self, index: usize) -> (ResponseType, bool) {
|
||||
let buffer = self.buffers.get(index).unwrap();
|
||||
|
||||
(buffer.response_type, buffer.receiver_is_ipv4)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
///
|
||||
/// Only safe to call once buffer is no longer referenced by in-flight
|
||||
/// io_uring queue entries
|
||||
pub unsafe fn mark_buffer_as_free(&mut self, index: usize) {
|
||||
self.buffers[index].free = true;
|
||||
}
|
||||
|
||||
/// Call after going through completion queue
|
||||
pub fn reset_likely_next_free_index(&mut self) {
|
||||
self.likely_next_free_index = 0;
|
||||
}
|
||||
|
||||
pub fn prepare_entry(
|
||||
&mut self,
|
||||
response: &Response,
|
||||
addr: CanonicalSocketAddr,
|
||||
) -> Result<io_uring::squeue::Entry, Error> {
|
||||
let index = self.next_free_index()?;
|
||||
|
||||
let buffer = self.buffers.index_mut(index);
|
||||
|
||||
// Safety: OK because buffers are stored in fixed memory location,
|
||||
// buffer pointers were set up in SendBuffers::new() and pointers to
|
||||
// SendBuffer UnsafeCell contents are not accessed elsewhere
|
||||
unsafe {
|
||||
match buffer.prepare_entry(response, addr, self.socket_is_ipv4) {
|
||||
Ok(entry) => {
|
||||
self.likely_next_free_index = index + 1;
|
||||
|
||||
Ok(entry.user_data(index as u64))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn next_free_index(&self) -> Result<usize, Error> {
|
||||
if self.likely_next_free_index >= self.buffers.len() {
|
||||
return Err(Error::NoBuffers);
|
||||
}
|
||||
|
||||
for (i, buffer) in self.buffers[self.likely_next_free_index..]
|
||||
.iter()
|
||||
.enumerate()
|
||||
{
|
||||
if buffer.free {
|
||||
return Ok(self.likely_next_free_index + i);
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::NoBuffers)
|
||||
}
|
||||
}
|
||||
143
crates/udp/src/workers/socket/validator.rs
Normal file
143
crates/udp/src/workers/socket/validator.rs
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
use std::net::IpAddr;
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use constant_time_eq::constant_time_eq;
|
||||
use getrandom::getrandom;
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use aquatic_udp_protocol::ConnectionId;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
/// HMAC (BLAKE3) based ConnectionID creator and validator
|
||||
///
|
||||
/// Structure of created ConnectionID (bytes making up inner i64):
|
||||
/// - &[0..4]: connection expiration time as number of seconds after
|
||||
/// ConnectionValidator instance was created, encoded as u32 bytes.
|
||||
/// Value fits around 136 years.
|
||||
/// - &[4..8]: truncated keyed BLAKE3 hash of above 4 bytes and octets of
|
||||
/// client IP address
|
||||
///
|
||||
/// The purpose of using ConnectionIDs is to prevent IP spoofing, mainly to
|
||||
/// prevent the tracker from being used as an amplification vector for DDoS
|
||||
/// attacks. By including 32 bits of BLAKE3 keyed hash output in its contents,
|
||||
/// such abuse should be rendered impractical.
|
||||
#[derive(Clone)]
|
||||
pub struct ConnectionValidator {
|
||||
start_time: Instant,
|
||||
max_connection_age: u32,
|
||||
keyed_hasher: blake3::Hasher,
|
||||
}
|
||||
|
||||
impl ConnectionValidator {
|
||||
/// Create new instance. Must be created once and cloned if used in several
|
||||
/// threads.
|
||||
pub fn new(config: &Config) -> anyhow::Result<Self> {
|
||||
let mut key = [0; 32];
|
||||
|
||||
getrandom(&mut key)
|
||||
.with_context(|| "Couldn't get random bytes for ConnectionValidator key")?;
|
||||
|
||||
let keyed_hasher = blake3::Hasher::new_keyed(&key);
|
||||
|
||||
Ok(Self {
|
||||
keyed_hasher,
|
||||
start_time: Instant::now(),
|
||||
max_connection_age: config.cleaning.max_connection_age,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create_connection_id(&mut self, source_addr: CanonicalSocketAddr) -> ConnectionId {
|
||||
let valid_until =
|
||||
(self.start_time.elapsed().as_secs() as u32 + self.max_connection_age).to_ne_bytes();
|
||||
|
||||
let hash = self.hash(valid_until, source_addr.get().ip());
|
||||
|
||||
let mut connection_id_bytes = [0u8; 8];
|
||||
|
||||
(&mut connection_id_bytes[..4]).copy_from_slice(&valid_until);
|
||||
(&mut connection_id_bytes[4..]).copy_from_slice(&hash);
|
||||
|
||||
ConnectionId(i64::from_ne_bytes(connection_id_bytes))
|
||||
}
|
||||
|
||||
pub fn connection_id_valid(
|
||||
&mut self,
|
||||
source_addr: CanonicalSocketAddr,
|
||||
connection_id: ConnectionId,
|
||||
) -> bool {
|
||||
let bytes = connection_id.0.to_ne_bytes();
|
||||
let (valid_until, hash) = bytes.split_at(4);
|
||||
let valid_until: [u8; 4] = valid_until.try_into().unwrap();
|
||||
|
||||
if !constant_time_eq(hash, &self.hash(valid_until, source_addr.get().ip())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
u32::from_ne_bytes(valid_until) > self.start_time.elapsed().as_secs() as u32
|
||||
}
|
||||
|
||||
fn hash(&mut self, valid_until: [u8; 4], ip_addr: IpAddr) -> [u8; 4] {
|
||||
self.keyed_hasher.update(&valid_until);
|
||||
|
||||
match ip_addr {
|
||||
IpAddr::V4(ip) => self.keyed_hasher.update(&ip.octets()),
|
||||
IpAddr::V6(ip) => self.keyed_hasher.update(&ip.octets()),
|
||||
};
|
||||
|
||||
let mut hash = [0u8; 4];
|
||||
|
||||
self.keyed_hasher.finalize_xof().fill(&mut hash);
|
||||
self.keyed_hasher.reset();
|
||||
|
||||
hash
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use quickcheck_macros::quickcheck;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[quickcheck]
|
||||
fn test_connection_validator(
|
||||
original_addr: IpAddr,
|
||||
different_addr: IpAddr,
|
||||
max_connection_age: u32,
|
||||
) -> quickcheck::TestResult {
|
||||
let original_addr = CanonicalSocketAddr::new(SocketAddr::new(original_addr, 0));
|
||||
let different_addr = CanonicalSocketAddr::new(SocketAddr::new(different_addr, 0));
|
||||
|
||||
if original_addr == different_addr {
|
||||
return quickcheck::TestResult::discard();
|
||||
}
|
||||
|
||||
let mut validator = {
|
||||
let mut config = Config::default();
|
||||
|
||||
config.cleaning.max_connection_age = max_connection_age;
|
||||
|
||||
ConnectionValidator::new(&config).unwrap()
|
||||
};
|
||||
|
||||
let connection_id = validator.create_connection_id(original_addr);
|
||||
|
||||
let original_valid = validator.connection_id_valid(original_addr, connection_id);
|
||||
let different_valid = validator.connection_id_valid(different_addr, connection_id);
|
||||
|
||||
if different_valid {
|
||||
return quickcheck::TestResult::failed();
|
||||
}
|
||||
|
||||
if max_connection_age == 0 {
|
||||
quickcheck::TestResult::from_bool(!original_valid)
|
||||
} else {
|
||||
// Note: depends on that running this test takes less than a second
|
||||
quickcheck::TestResult::from_bool(original_valid)
|
||||
}
|
||||
}
|
||||
}
|
||||
318
crates/udp/src/workers/statistics/collector.rs
Normal file
318
crates/udp/src/workers/statistics/collector.rs
Normal file
|
|
@ -0,0 +1,318 @@
|
|||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use hdrhistogram::Histogram;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::common::Statistics;
|
||||
use crate::config::Config;
|
||||
|
||||
pub struct StatisticsCollector {
|
||||
shared: Arc<Statistics>,
|
||||
last_update: Instant,
|
||||
pending_histograms: Vec<Histogram<u64>>,
|
||||
last_complete_histogram: PeerHistogramStatistics,
|
||||
#[cfg(feature = "prometheus")]
|
||||
ip_version: String,
|
||||
}
|
||||
|
||||
impl StatisticsCollector {
|
||||
pub fn new(shared: Arc<Statistics>, #[cfg(feature = "prometheus")] ip_version: String) -> Self {
|
||||
Self {
|
||||
shared,
|
||||
last_update: Instant::now(),
|
||||
pending_histograms: Vec::new(),
|
||||
last_complete_histogram: Default::default(),
|
||||
#[cfg(feature = "prometheus")]
|
||||
ip_version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_histogram(&mut self, config: &Config, histogram: Histogram<u64>) {
|
||||
self.pending_histograms.push(histogram);
|
||||
|
||||
if self.pending_histograms.len() == config.swarm_workers {
|
||||
self.last_complete_histogram =
|
||||
PeerHistogramStatistics::new(self.pending_histograms.drain(..).sum());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect_from_shared(
|
||||
&mut self,
|
||||
#[cfg(feature = "prometheus")] config: &Config,
|
||||
) -> CollectedStatistics {
|
||||
let requests_received = Self::fetch_and_reset(&self.shared.requests_received);
|
||||
let responses_sent_connect = Self::fetch_and_reset(&self.shared.responses_sent_connect);
|
||||
let responses_sent_announce = Self::fetch_and_reset(&self.shared.responses_sent_announce);
|
||||
let responses_sent_scrape = Self::fetch_and_reset(&self.shared.responses_sent_scrape);
|
||||
let responses_sent_error = Self::fetch_and_reset(&self.shared.responses_sent_error);
|
||||
|
||||
let bytes_received = Self::fetch_and_reset(&self.shared.bytes_received);
|
||||
let bytes_sent = Self::fetch_and_reset(&self.shared.bytes_sent);
|
||||
|
||||
let num_torrents_by_worker: Vec<usize> = self
|
||||
.shared
|
||||
.torrents
|
||||
.iter()
|
||||
.map(|n| n.load(Ordering::Relaxed))
|
||||
.collect();
|
||||
let num_peers_by_worker: Vec<usize> = self
|
||||
.shared
|
||||
.peers
|
||||
.iter()
|
||||
.map(|n| n.load(Ordering::Relaxed))
|
||||
.collect();
|
||||
|
||||
let elapsed = {
|
||||
let now = Instant::now();
|
||||
|
||||
let elapsed = (now - self.last_update).as_secs_f64();
|
||||
|
||||
self.last_update = now;
|
||||
|
||||
elapsed
|
||||
};
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::counter!(
|
||||
"aquatic_requests_total",
|
||||
requests_received.try_into().unwrap(),
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_connect.try_into().unwrap(),
|
||||
"type" => "connect",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_announce.try_into().unwrap(),
|
||||
"type" => "announce",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_scrape.try_into().unwrap(),
|
||||
"type" => "scrape",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_responses_total",
|
||||
responses_sent_error.try_into().unwrap(),
|
||||
"type" => "error",
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_rx_bytes",
|
||||
bytes_received.try_into().unwrap(),
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
::metrics::counter!(
|
||||
"aquatic_tx_bytes",
|
||||
bytes_sent.try_into().unwrap(),
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
);
|
||||
|
||||
for (worker_index, n) in num_torrents_by_worker.iter().copied().enumerate() {
|
||||
::metrics::gauge!(
|
||||
"aquatic_torrents",
|
||||
n as f64,
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
}
|
||||
for (worker_index, n) in num_peers_by_worker.iter().copied().enumerate() {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers",
|
||||
n as f64,
|
||||
"ip_version" => self.ip_version.clone(),
|
||||
"worker_index" => worker_index.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
if config.statistics.torrent_peer_histograms {
|
||||
self.last_complete_histogram
|
||||
.update_metrics(self.ip_version.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let num_peers: usize = num_peers_by_worker.into_iter().sum();
|
||||
let num_torrents: usize = num_torrents_by_worker.into_iter().sum();
|
||||
|
||||
let requests_per_second = requests_received as f64 / elapsed;
|
||||
let responses_per_second_connect = responses_sent_connect as f64 / elapsed;
|
||||
let responses_per_second_announce = responses_sent_announce as f64 / elapsed;
|
||||
let responses_per_second_scrape = responses_sent_scrape as f64 / elapsed;
|
||||
let responses_per_second_error = responses_sent_error as f64 / elapsed;
|
||||
let bytes_received_per_second = bytes_received as f64 / elapsed;
|
||||
let bytes_sent_per_second = bytes_sent as f64 / elapsed;
|
||||
|
||||
let responses_per_second_total = responses_per_second_connect
|
||||
+ responses_per_second_announce
|
||||
+ responses_per_second_scrape
|
||||
+ responses_per_second_error;
|
||||
|
||||
CollectedStatistics {
|
||||
requests_per_second: (requests_per_second as usize).to_formatted_string(&Locale::en),
|
||||
responses_per_second_total: (responses_per_second_total as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_connect: (responses_per_second_connect as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_announce: (responses_per_second_announce as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_scrape: (responses_per_second_scrape as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
responses_per_second_error: (responses_per_second_error as usize)
|
||||
.to_formatted_string(&Locale::en),
|
||||
rx_mbits: format!("{:.2}", bytes_received_per_second * 8.0 / 1_000_000.0),
|
||||
tx_mbits: format!("{:.2}", bytes_sent_per_second * 8.0 / 1_000_000.0),
|
||||
num_torrents: num_torrents.to_formatted_string(&Locale::en),
|
||||
num_peers: num_peers.to_formatted_string(&Locale::en),
|
||||
peer_histogram: self.last_complete_histogram.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fetch_and_reset(atomic: &AtomicUsize) -> usize {
|
||||
atomic.fetch_and(0, Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct CollectedStatistics {
|
||||
pub requests_per_second: String,
|
||||
pub responses_per_second_total: String,
|
||||
pub responses_per_second_connect: String,
|
||||
pub responses_per_second_announce: String,
|
||||
pub responses_per_second_scrape: String,
|
||||
pub responses_per_second_error: String,
|
||||
pub rx_mbits: String,
|
||||
pub tx_mbits: String,
|
||||
pub num_torrents: String,
|
||||
pub num_peers: String,
|
||||
pub peer_histogram: PeerHistogramStatistics,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Default)]
|
||||
pub struct PeerHistogramStatistics {
|
||||
pub min: u64,
|
||||
pub p10: u64,
|
||||
pub p20: u64,
|
||||
pub p30: u64,
|
||||
pub p40: u64,
|
||||
pub p50: u64,
|
||||
pub p60: u64,
|
||||
pub p70: u64,
|
||||
pub p80: u64,
|
||||
pub p90: u64,
|
||||
pub p95: u64,
|
||||
pub p99: u64,
|
||||
pub p999: u64,
|
||||
pub max: u64,
|
||||
}
|
||||
|
||||
impl PeerHistogramStatistics {
|
||||
fn new(h: Histogram<u64>) -> Self {
|
||||
Self {
|
||||
min: h.min(),
|
||||
p10: h.value_at_percentile(10.0),
|
||||
p20: h.value_at_percentile(20.0),
|
||||
p30: h.value_at_percentile(30.0),
|
||||
p40: h.value_at_percentile(40.0),
|
||||
p50: h.value_at_percentile(50.0),
|
||||
p60: h.value_at_percentile(60.0),
|
||||
p70: h.value_at_percentile(70.0),
|
||||
p80: h.value_at_percentile(80.0),
|
||||
p90: h.value_at_percentile(90.0),
|
||||
p95: h.value_at_percentile(95.0),
|
||||
p99: h.value_at_percentile(99.0),
|
||||
p999: h.value_at_percentile(99.9),
|
||||
max: h.max(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
fn update_metrics(&self, ip_version: String) {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.min as f64,
|
||||
"type" => "max",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p10 as f64,
|
||||
"type" => "p10",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p20 as f64,
|
||||
"type" => "p20",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p30 as f64,
|
||||
"type" => "p30",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p40 as f64,
|
||||
"type" => "p40",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p50 as f64,
|
||||
"type" => "p50",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p60 as f64,
|
||||
"type" => "p60",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p70 as f64,
|
||||
"type" => "p70",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p80 as f64,
|
||||
"type" => "p80",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p90 as f64,
|
||||
"type" => "p90",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p99 as f64,
|
||||
"type" => "p99",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.p999 as f64,
|
||||
"type" => "p99.9",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
::metrics::gauge!(
|
||||
"aquatic_peers_per_torrent",
|
||||
self.max as f64,
|
||||
"type" => "max",
|
||||
"ip_version" => ip_version.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
306
crates/udp/src/workers/statistics/mod.rs
Normal file
306
crates/udp/src/workers/statistics/mod.rs
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
mod collector;
|
||||
|
||||
use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::{IndexMap, PanicSentinel};
|
||||
use aquatic_udp_protocol::{PeerClient, PeerId};
|
||||
use compact_str::CompactString;
|
||||
use crossbeam_channel::Receiver;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use serde::Serialize;
|
||||
use time::format_description::well_known::Rfc2822;
|
||||
use time::OffsetDateTime;
|
||||
use tinytemplate::TinyTemplate;
|
||||
|
||||
use collector::{CollectedStatistics, StatisticsCollector};
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
const TEMPLATE_KEY: &str = "statistics";
|
||||
const TEMPLATE_CONTENTS: &str = include_str!("../../../templates/statistics.html");
|
||||
const STYLESHEET_CONTENTS: &str = concat!(
|
||||
"<style>",
|
||||
include_str!("../../../templates/statistics.css"),
|
||||
"</style>"
|
||||
);
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
struct TemplateData {
|
||||
stylesheet: String,
|
||||
ipv4_active: bool,
|
||||
ipv6_active: bool,
|
||||
extended_active: bool,
|
||||
ipv4: CollectedStatistics,
|
||||
ipv6: CollectedStatistics,
|
||||
last_updated: String,
|
||||
peer_update_interval: String,
|
||||
peer_clients: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
pub fn run_statistics_worker(
|
||||
_sentinel: PanicSentinel,
|
||||
config: Config,
|
||||
shared_state: State,
|
||||
statistics_receiver: Receiver<StatisticsMessage>,
|
||||
) {
|
||||
let process_peer_client_data = {
|
||||
let mut collect = config.statistics.write_html_to_file;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
{
|
||||
collect |= config.statistics.run_prometheus_endpoint;
|
||||
}
|
||||
|
||||
collect & config.statistics.peer_clients
|
||||
};
|
||||
|
||||
let opt_tt = if config.statistics.write_html_to_file {
|
||||
let mut tt = TinyTemplate::new();
|
||||
|
||||
if let Err(err) = tt.add_template(TEMPLATE_KEY, TEMPLATE_CONTENTS) {
|
||||
::log::error!("Couldn't parse statistics html template: {:#}", err);
|
||||
|
||||
None
|
||||
} else {
|
||||
Some(tt)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut ipv4_collector = StatisticsCollector::new(
|
||||
shared_state.statistics_ipv4,
|
||||
#[cfg(feature = "prometheus")]
|
||||
"4".into(),
|
||||
);
|
||||
let mut ipv6_collector = StatisticsCollector::new(
|
||||
shared_state.statistics_ipv6,
|
||||
#[cfg(feature = "prometheus")]
|
||||
"6".into(),
|
||||
);
|
||||
|
||||
// Store a count to enable not removing peers from the count completely
|
||||
// just because they were removed from one torrent
|
||||
let mut peers: IndexMap<PeerId, (usize, PeerClient, CompactString)> = IndexMap::default();
|
||||
|
||||
loop {
|
||||
let start_time = Instant::now();
|
||||
|
||||
for message in statistics_receiver.try_iter() {
|
||||
match message {
|
||||
StatisticsMessage::Ipv4PeerHistogram(h) => ipv4_collector.add_histogram(&config, h),
|
||||
StatisticsMessage::Ipv6PeerHistogram(h) => ipv6_collector.add_histogram(&config, h),
|
||||
StatisticsMessage::PeerAdded(peer_id) => {
|
||||
if process_peer_client_data {
|
||||
peers
|
||||
.entry(peer_id)
|
||||
.or_insert_with(|| (0, peer_id.client(), peer_id.first_8_bytes_hex()))
|
||||
.0 += 1;
|
||||
}
|
||||
}
|
||||
StatisticsMessage::PeerRemoved(peer_id) => {
|
||||
if process_peer_client_data {
|
||||
if let Some((count, _, _)) = peers.get_mut(&peer_id) {
|
||||
*count -= 1;
|
||||
|
||||
if *count == 0 {
|
||||
peers.remove(&peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let statistics_ipv4 = ipv4_collector.collect_from_shared(
|
||||
#[cfg(feature = "prometheus")]
|
||||
&config,
|
||||
);
|
||||
let statistics_ipv6 = ipv6_collector.collect_from_shared(
|
||||
#[cfg(feature = "prometheus")]
|
||||
&config,
|
||||
);
|
||||
|
||||
let peer_clients = if process_peer_client_data {
|
||||
let mut clients: IndexMap<PeerClient, usize> = IndexMap::default();
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
let mut prefixes: IndexMap<CompactString, usize> = IndexMap::default();
|
||||
|
||||
// Only count peer_ids once, even if they are in multiple torrents
|
||||
for (_, peer_client, prefix) in peers.values() {
|
||||
*clients.entry(peer_client.to_owned()).or_insert(0) += 1;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint
|
||||
&& config.statistics.prometheus_peer_id_prefixes
|
||||
{
|
||||
*prefixes.entry(prefix.to_owned()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
clients.sort_unstable_by(|_, a, _, b| b.cmp(a));
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint
|
||||
&& config.statistics.prometheus_peer_id_prefixes
|
||||
{
|
||||
for (prefix, count) in prefixes {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peer_id_prefixes",
|
||||
count as f64,
|
||||
"prefix_hex" => prefix.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let mut client_vec = Vec::with_capacity(clients.len());
|
||||
|
||||
for (client, count) in clients {
|
||||
if config.statistics.write_html_to_file {
|
||||
client_vec.push((client.to_string(), count.to_formatted_string(&Locale::en)));
|
||||
}
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.statistics.run_prometheus_endpoint {
|
||||
::metrics::gauge!(
|
||||
"aquatic_peer_clients",
|
||||
count as f64,
|
||||
"client" => client.to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
client_vec
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
if config.statistics.print_to_stdout {
|
||||
println!("General:");
|
||||
println!(
|
||||
" access list entries: {}",
|
||||
shared_state.access_list.load().len()
|
||||
);
|
||||
|
||||
if config.network.ipv4_active() {
|
||||
println!("IPv4:");
|
||||
print_to_stdout(&config, &statistics_ipv4);
|
||||
}
|
||||
if config.network.ipv6_active() {
|
||||
println!("IPv6:");
|
||||
print_to_stdout(&config, &statistics_ipv6);
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
if let Some(tt) = opt_tt.as_ref() {
|
||||
let template_data = TemplateData {
|
||||
stylesheet: STYLESHEET_CONTENTS.to_string(),
|
||||
ipv4_active: config.network.ipv4_active(),
|
||||
ipv6_active: config.network.ipv6_active(),
|
||||
extended_active: config.statistics.torrent_peer_histograms,
|
||||
ipv4: statistics_ipv4,
|
||||
ipv6: statistics_ipv6,
|
||||
last_updated: OffsetDateTime::now_utc()
|
||||
.format(&Rfc2822)
|
||||
.unwrap_or("(formatting error)".into()),
|
||||
peer_update_interval: format!("{}", config.cleaning.torrent_cleaning_interval),
|
||||
peer_clients,
|
||||
};
|
||||
|
||||
if let Err(err) = save_html_to_file(&config, tt, &template_data) {
|
||||
::log::error!("Couldn't save statistics to file: {:#}", err)
|
||||
}
|
||||
}
|
||||
|
||||
peers.shrink_to_fit();
|
||||
|
||||
if let Some(time_remaining) =
|
||||
Duration::from_secs(config.statistics.interval).checked_sub(start_time.elapsed())
|
||||
{
|
||||
::std::thread::sleep(time_remaining);
|
||||
} else {
|
||||
::log::warn!(
|
||||
"statistics interval not long enough to process all data, output may be misleading"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_to_stdout(config: &Config, statistics: &CollectedStatistics) {
|
||||
println!(
|
||||
" bandwidth: {:>7} Mbit/s in, {:7} Mbit/s out",
|
||||
statistics.rx_mbits, statistics.tx_mbits,
|
||||
);
|
||||
println!(" requests/second: {:>10}", statistics.requests_per_second);
|
||||
println!(" responses/second");
|
||||
println!(
|
||||
" total: {:>10}",
|
||||
statistics.responses_per_second_total
|
||||
);
|
||||
println!(
|
||||
" connect: {:>10}",
|
||||
statistics.responses_per_second_connect
|
||||
);
|
||||
println!(
|
||||
" announce: {:>10}",
|
||||
statistics.responses_per_second_announce
|
||||
);
|
||||
println!(
|
||||
" scrape: {:>10}",
|
||||
statistics.responses_per_second_scrape
|
||||
);
|
||||
println!(
|
||||
" error: {:>10}",
|
||||
statistics.responses_per_second_error
|
||||
);
|
||||
println!(" torrents: {:>10}", statistics.num_torrents);
|
||||
println!(
|
||||
" peers: {:>10} (updated every {}s)",
|
||||
statistics.num_peers, config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
|
||||
if config.statistics.torrent_peer_histograms {
|
||||
println!(
|
||||
" peers per torrent (updated every {}s)",
|
||||
config.cleaning.torrent_cleaning_interval
|
||||
);
|
||||
println!(" min {:>10}", statistics.peer_histogram.min);
|
||||
println!(" p10 {:>10}", statistics.peer_histogram.p10);
|
||||
println!(" p20 {:>10}", statistics.peer_histogram.p20);
|
||||
println!(" p30 {:>10}", statistics.peer_histogram.p30);
|
||||
println!(" p40 {:>10}", statistics.peer_histogram.p40);
|
||||
println!(" p50 {:>10}", statistics.peer_histogram.p50);
|
||||
println!(" p60 {:>10}", statistics.peer_histogram.p60);
|
||||
println!(" p70 {:>10}", statistics.peer_histogram.p70);
|
||||
println!(" p80 {:>10}", statistics.peer_histogram.p80);
|
||||
println!(" p90 {:>10}", statistics.peer_histogram.p90);
|
||||
println!(" p95 {:>10}", statistics.peer_histogram.p95);
|
||||
println!(" p99 {:>10}", statistics.peer_histogram.p99);
|
||||
println!(" p99.9 {:>10}", statistics.peer_histogram.p999);
|
||||
println!(" max {:>10}", statistics.peer_histogram.max);
|
||||
}
|
||||
}
|
||||
|
||||
fn save_html_to_file(
|
||||
config: &Config,
|
||||
tt: &TinyTemplate,
|
||||
template_data: &TemplateData,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut file = File::create(&config.statistics.html_file_path).with_context(|| {
|
||||
format!(
|
||||
"File path: {}",
|
||||
&config.statistics.html_file_path.to_string_lossy()
|
||||
)
|
||||
})?;
|
||||
|
||||
write!(file, "{}", tt.render(TEMPLATE_KEY, template_data)?)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
200
crates/udp/src/workers/swarm/mod.rs
Normal file
200
crates/udp/src/workers/swarm/mod.rs
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
mod storage;
|
||||
|
||||
use std::net::IpAddr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use crossbeam_channel::Receiver;
|
||||
use crossbeam_channel::Sender;
|
||||
use rand::{rngs::SmallRng, SeedableRng};
|
||||
|
||||
use aquatic_common::{CanonicalSocketAddr, PanicSentinel, ValidUntil};
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use storage::{TorrentMap, TorrentMaps};
|
||||
|
||||
pub fn run_swarm_worker(
|
||||
_sentinel: PanicSentinel,
|
||||
config: Config,
|
||||
state: State,
|
||||
server_start_instant: ServerStartInstant,
|
||||
request_receiver: Receiver<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
|
||||
response_sender: ConnectedResponseSender,
|
||||
statistics_sender: Sender<StatisticsMessage>,
|
||||
worker_index: SwarmWorkerIndex,
|
||||
) {
|
||||
let mut torrents = TorrentMaps::default();
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
||||
let timeout = Duration::from_millis(config.request_channel_recv_timeout_ms);
|
||||
let mut peer_valid_until = ValidUntil::new(server_start_instant, config.cleaning.max_peer_age);
|
||||
|
||||
let cleaning_interval = Duration::from_secs(config.cleaning.torrent_cleaning_interval);
|
||||
let statistics_update_interval = Duration::from_secs(config.statistics.interval);
|
||||
|
||||
let mut last_cleaning = Instant::now();
|
||||
let mut last_statistics_update = Instant::now();
|
||||
|
||||
let mut iter_counter = 0usize;
|
||||
|
||||
loop {
|
||||
if let Ok((sender_index, request, src)) = request_receiver.recv_timeout(timeout) {
|
||||
let response = match (request, src.get().ip()) {
|
||||
(ConnectedRequest::Announce(request), IpAddr::V4(ip)) => {
|
||||
let response = handle_announce_request(
|
||||
&config,
|
||||
&mut rng,
|
||||
&statistics_sender,
|
||||
&mut torrents.ipv4,
|
||||
request,
|
||||
ip,
|
||||
peer_valid_until,
|
||||
);
|
||||
|
||||
ConnectedResponse::AnnounceIpv4(response)
|
||||
}
|
||||
(ConnectedRequest::Announce(request), IpAddr::V6(ip)) => {
|
||||
let response = handle_announce_request(
|
||||
&config,
|
||||
&mut rng,
|
||||
&statistics_sender,
|
||||
&mut torrents.ipv6,
|
||||
request,
|
||||
ip,
|
||||
peer_valid_until,
|
||||
);
|
||||
|
||||
ConnectedResponse::AnnounceIpv6(response)
|
||||
}
|
||||
(ConnectedRequest::Scrape(request), IpAddr::V4(_)) => {
|
||||
ConnectedResponse::Scrape(handle_scrape_request(&mut torrents.ipv4, request))
|
||||
}
|
||||
(ConnectedRequest::Scrape(request), IpAddr::V6(_)) => {
|
||||
ConnectedResponse::Scrape(handle_scrape_request(&mut torrents.ipv6, request))
|
||||
}
|
||||
};
|
||||
|
||||
response_sender.try_send_to(sender_index, response, src);
|
||||
}
|
||||
|
||||
// Run periodic tasks
|
||||
if iter_counter % 128 == 0 {
|
||||
let now = Instant::now();
|
||||
|
||||
peer_valid_until = ValidUntil::new(server_start_instant, config.cleaning.max_peer_age);
|
||||
|
||||
if now > last_cleaning + cleaning_interval {
|
||||
torrents.clean_and_update_statistics(
|
||||
&config,
|
||||
&state,
|
||||
&statistics_sender,
|
||||
&state.access_list,
|
||||
server_start_instant,
|
||||
worker_index,
|
||||
);
|
||||
|
||||
last_cleaning = now;
|
||||
}
|
||||
if config.statistics.active()
|
||||
&& now > last_statistics_update + statistics_update_interval
|
||||
{
|
||||
state.statistics_ipv4.torrents[worker_index.0]
|
||||
.store(torrents.ipv4.num_torrents(), Ordering::Release);
|
||||
state.statistics_ipv6.torrents[worker_index.0]
|
||||
.store(torrents.ipv6.num_torrents(), Ordering::Release);
|
||||
|
||||
last_statistics_update = now;
|
||||
}
|
||||
}
|
||||
|
||||
iter_counter = iter_counter.wrapping_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_announce_request<I: Ip>(
|
||||
config: &Config,
|
||||
rng: &mut SmallRng,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
torrents: &mut TorrentMap<I>,
|
||||
request: AnnounceRequest,
|
||||
peer_ip: I,
|
||||
peer_valid_until: ValidUntil,
|
||||
) -> AnnounceResponse<I> {
|
||||
let max_num_peers_to_take: usize = if request.peers_wanted.0 <= 0 {
|
||||
config.protocol.max_response_peers
|
||||
} else {
|
||||
::std::cmp::min(
|
||||
config.protocol.max_response_peers,
|
||||
request.peers_wanted.0.try_into().unwrap(),
|
||||
)
|
||||
};
|
||||
|
||||
let torrent_data = torrents.0.entry(request.info_hash).or_default();
|
||||
|
||||
let peer_status = PeerStatus::from_event_and_bytes_left(request.event, request.bytes_left);
|
||||
|
||||
torrent_data.update_peer(
|
||||
config,
|
||||
statistics_sender,
|
||||
request.peer_id,
|
||||
peer_ip,
|
||||
request.port,
|
||||
peer_status,
|
||||
peer_valid_until,
|
||||
);
|
||||
|
||||
let response_peers = if let PeerStatus::Stopped = peer_status {
|
||||
Vec::new()
|
||||
} else {
|
||||
torrent_data.extract_response_peers(rng, request.peer_id, max_num_peers_to_take)
|
||||
};
|
||||
|
||||
AnnounceResponse {
|
||||
transaction_id: request.transaction_id,
|
||||
announce_interval: AnnounceInterval(config.protocol.peer_announce_interval),
|
||||
leechers: NumberOfPeers(torrent_data.num_leechers().try_into().unwrap_or(i32::MAX)),
|
||||
seeders: NumberOfPeers(torrent_data.num_seeders().try_into().unwrap_or(i32::MAX)),
|
||||
peers: response_peers,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_scrape_request<I: Ip>(
|
||||
torrents: &mut TorrentMap<I>,
|
||||
request: PendingScrapeRequest,
|
||||
) -> PendingScrapeResponse {
|
||||
const EMPTY_STATS: TorrentScrapeStatistics = create_torrent_scrape_statistics(0, 0);
|
||||
|
||||
let torrent_stats = request
|
||||
.info_hashes
|
||||
.into_iter()
|
||||
.map(|(i, info_hash)| {
|
||||
let stats = torrents
|
||||
.0
|
||||
.get(&info_hash)
|
||||
.map(|torrent_data| torrent_data.scrape_statistics())
|
||||
.unwrap_or(EMPTY_STATS);
|
||||
|
||||
(i, stats)
|
||||
})
|
||||
.collect();
|
||||
|
||||
PendingScrapeResponse {
|
||||
slab_key: request.slab_key,
|
||||
torrent_stats,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
const fn create_torrent_scrape_statistics(seeders: i32, leechers: i32) -> TorrentScrapeStatistics {
|
||||
TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers(seeders),
|
||||
completed: NumberOfDownloads(0), // No implementation planned
|
||||
leechers: NumberOfPeers(leechers),
|
||||
}
|
||||
}
|
||||
402
crates/udp/src/workers/swarm/storage.rs
Normal file
402
crates/udp/src/workers/swarm/storage.rs
Normal file
|
|
@ -0,0 +1,402 @@
|
|||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use aquatic_common::IndexMap;
|
||||
use aquatic_common::SecondsSinceServerStart;
|
||||
use aquatic_common::ServerStartInstant;
|
||||
use aquatic_common::{
|
||||
access_list::{create_access_list_cache, AccessListArcSwap, AccessListCache, AccessListMode},
|
||||
extract_response_peers, ValidUntil,
|
||||
};
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
use crossbeam_channel::Sender;
|
||||
use hdrhistogram::Histogram;
|
||||
use rand::prelude::SmallRng;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
|
||||
use super::create_torrent_scrape_statistics;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct Peer<I: Ip> {
|
||||
ip_address: I,
|
||||
port: Port,
|
||||
is_seeder: bool,
|
||||
valid_until: ValidUntil,
|
||||
}
|
||||
|
||||
impl<I: Ip> Peer<I> {
|
||||
fn to_response_peer(&self) -> ResponsePeer<I> {
|
||||
ResponsePeer {
|
||||
ip_address: self.ip_address,
|
||||
port: self.port,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type PeerMap<I> = IndexMap<PeerId, Peer<I>>;
|
||||
|
||||
pub struct TorrentData<I: Ip> {
|
||||
peers: PeerMap<I>,
|
||||
num_seeders: usize,
|
||||
}
|
||||
|
||||
impl<I: Ip> TorrentData<I> {
|
||||
pub fn update_peer(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
peer_id: PeerId,
|
||||
ip_address: I,
|
||||
port: Port,
|
||||
status: PeerStatus,
|
||||
valid_until: ValidUntil,
|
||||
) {
|
||||
let opt_removed_peer = match status {
|
||||
PeerStatus::Leeching => {
|
||||
let peer = Peer {
|
||||
ip_address,
|
||||
port,
|
||||
is_seeder: false,
|
||||
valid_until,
|
||||
};
|
||||
|
||||
self.peers.insert(peer_id, peer)
|
||||
}
|
||||
PeerStatus::Seeding => {
|
||||
let peer = Peer {
|
||||
ip_address,
|
||||
port,
|
||||
is_seeder: true,
|
||||
valid_until,
|
||||
};
|
||||
|
||||
self.num_seeders += 1;
|
||||
|
||||
self.peers.insert(peer_id, peer)
|
||||
}
|
||||
PeerStatus::Stopped => self.peers.remove(&peer_id),
|
||||
};
|
||||
|
||||
if config.statistics.peer_clients {
|
||||
match (status, opt_removed_peer.is_some()) {
|
||||
// We added a new peer
|
||||
(PeerStatus::Leeching | PeerStatus::Seeding, false) => {
|
||||
if let Err(_) =
|
||||
statistics_sender.try_send(StatisticsMessage::PeerAdded(peer_id))
|
||||
{
|
||||
// Should never happen in practice
|
||||
::log::error!("Couldn't send StatisticsMessage::PeerAdded");
|
||||
}
|
||||
}
|
||||
// We removed an existing peer
|
||||
(PeerStatus::Stopped, true) => {
|
||||
if let Err(_) =
|
||||
statistics_sender.try_send(StatisticsMessage::PeerRemoved(peer_id))
|
||||
{
|
||||
// Should never happen in practice
|
||||
::log::error!("Couldn't send StatisticsMessage::PeerRemoved");
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(Peer {
|
||||
is_seeder: true, ..
|
||||
}) = opt_removed_peer
|
||||
{
|
||||
self.num_seeders -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn extract_response_peers(
|
||||
&self,
|
||||
rng: &mut SmallRng,
|
||||
peer_id: PeerId,
|
||||
max_num_peers_to_take: usize,
|
||||
) -> Vec<ResponsePeer<I>> {
|
||||
extract_response_peers(
|
||||
rng,
|
||||
&self.peers,
|
||||
max_num_peers_to_take,
|
||||
peer_id,
|
||||
Peer::to_response_peer,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn num_leechers(&self) -> usize {
|
||||
self.peers.len() - self.num_seeders
|
||||
}
|
||||
|
||||
pub fn num_seeders(&self) -> usize {
|
||||
self.num_seeders
|
||||
}
|
||||
|
||||
pub fn scrape_statistics(&self) -> TorrentScrapeStatistics {
|
||||
create_torrent_scrape_statistics(
|
||||
self.num_seeders.try_into().unwrap_or(i32::MAX),
|
||||
self.num_leechers().try_into().unwrap_or(i32::MAX),
|
||||
)
|
||||
}
|
||||
|
||||
/// Remove inactive peers and reclaim space
|
||||
fn clean(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
now: SecondsSinceServerStart,
|
||||
) {
|
||||
self.peers.retain(|peer_id, peer| {
|
||||
let keep = peer.valid_until.valid(now);
|
||||
|
||||
if !keep {
|
||||
if peer.is_seeder {
|
||||
self.num_seeders -= 1;
|
||||
}
|
||||
if config.statistics.peer_clients {
|
||||
if let Err(_) =
|
||||
statistics_sender.try_send(StatisticsMessage::PeerRemoved(*peer_id))
|
||||
{
|
||||
// Should never happen in practice
|
||||
::log::error!("Couldn't send StatisticsMessage::PeerRemoved");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
keep
|
||||
});
|
||||
|
||||
if !self.peers.is_empty() {
|
||||
self.peers.shrink_to_fit();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Ip> Default for TorrentData<I> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
peers: Default::default(),
|
||||
num_seeders: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TorrentMap<I: Ip>(pub IndexMap<InfoHash, TorrentData<I>>);
|
||||
|
||||
impl<I: Ip> TorrentMap<I> {
|
||||
/// Remove forbidden or inactive torrents, reclaim space and return number of remaining peers
|
||||
fn clean_and_get_statistics(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
access_list_cache: &mut AccessListCache,
|
||||
access_list_mode: AccessListMode,
|
||||
now: SecondsSinceServerStart,
|
||||
) -> (usize, Option<Histogram<u64>>) {
|
||||
let mut num_peers = 0;
|
||||
|
||||
let mut opt_histogram: Option<Histogram<u64>> = if config.statistics.torrent_peer_histograms
|
||||
{
|
||||
match Histogram::new(3) {
|
||||
Ok(histogram) => Some(histogram),
|
||||
Err(err) => {
|
||||
::log::error!("Couldn't create peer histogram: {:#}", err);
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.0.retain(|info_hash, torrent| {
|
||||
if !access_list_cache
|
||||
.load()
|
||||
.allows(access_list_mode, &info_hash.0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
torrent.clean(config, statistics_sender, now);
|
||||
|
||||
num_peers += torrent.peers.len();
|
||||
|
||||
match opt_histogram {
|
||||
Some(ref mut histogram) if torrent.peers.len() != 0 => {
|
||||
let n = torrent
|
||||
.peers
|
||||
.len()
|
||||
.try_into()
|
||||
.expect("Couldn't fit usize into u64");
|
||||
|
||||
if let Err(err) = histogram.record(n) {
|
||||
::log::error!("Couldn't record {} to histogram: {:#}", n, err);
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
!torrent.peers.is_empty()
|
||||
});
|
||||
|
||||
self.0.shrink_to_fit();
|
||||
|
||||
(num_peers, opt_histogram)
|
||||
}
|
||||
|
||||
pub fn num_torrents(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TorrentMaps {
|
||||
pub ipv4: TorrentMap<Ipv4Addr>,
|
||||
pub ipv6: TorrentMap<Ipv6Addr>,
|
||||
}
|
||||
|
||||
impl Default for TorrentMaps {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
ipv4: TorrentMap(Default::default()),
|
||||
ipv6: TorrentMap(Default::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TorrentMaps {
|
||||
/// Remove forbidden or inactive torrents, reclaim space and update statistics
|
||||
pub fn clean_and_update_statistics(
|
||||
&mut self,
|
||||
config: &Config,
|
||||
state: &State,
|
||||
statistics_sender: &Sender<StatisticsMessage>,
|
||||
access_list: &Arc<AccessListArcSwap>,
|
||||
server_start_instant: ServerStartInstant,
|
||||
worker_index: SwarmWorkerIndex,
|
||||
) {
|
||||
let mut cache = create_access_list_cache(access_list);
|
||||
let mode = config.access_list.mode;
|
||||
let now = server_start_instant.seconds_elapsed();
|
||||
|
||||
let ipv4 =
|
||||
self.ipv4
|
||||
.clean_and_get_statistics(config, statistics_sender, &mut cache, mode, now);
|
||||
let ipv6 =
|
||||
self.ipv6
|
||||
.clean_and_get_statistics(config, statistics_sender, &mut cache, mode, now);
|
||||
|
||||
if config.statistics.active() {
|
||||
state.statistics_ipv4.peers[worker_index.0].store(ipv4.0, Ordering::Release);
|
||||
state.statistics_ipv6.peers[worker_index.0].store(ipv6.0, Ordering::Release);
|
||||
|
||||
if let Some(message) = ipv4.1.map(StatisticsMessage::Ipv4PeerHistogram) {
|
||||
if let Err(err) = statistics_sender.try_send(message) {
|
||||
::log::error!("couldn't send statistics message: {:#}", err);
|
||||
}
|
||||
}
|
||||
if let Some(message) = ipv6.1.map(StatisticsMessage::Ipv6PeerHistogram) {
|
||||
if let Err(err) = statistics_sender.try_send(message) {
|
||||
::log::error!("couldn't send statistics message: {:#}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use quickcheck::{quickcheck, TestResult};
|
||||
use rand::thread_rng;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn gen_peer_id(i: u32) -> PeerId {
|
||||
let mut peer_id = PeerId([0; 20]);
|
||||
|
||||
peer_id.0[0..4].copy_from_slice(&i.to_ne_bytes());
|
||||
|
||||
peer_id
|
||||
}
|
||||
fn gen_peer(i: u32) -> Peer<Ipv4Addr> {
|
||||
Peer {
|
||||
ip_address: Ipv4Addr::from(i.to_be_bytes()),
|
||||
port: Port(1),
|
||||
is_seeder: false,
|
||||
valid_until: ValidUntil::new(ServerStartInstant::new(), 0),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_response_peers() {
|
||||
fn prop(data: (u16, u16)) -> TestResult {
|
||||
let gen_num_peers = data.0 as u32;
|
||||
let req_num_peers = data.1 as usize;
|
||||
|
||||
let mut peer_map: PeerMap<Ipv4Addr> = Default::default();
|
||||
|
||||
let mut opt_sender_key = None;
|
||||
let mut opt_sender_peer = None;
|
||||
|
||||
for i in 0..gen_num_peers {
|
||||
let key = gen_peer_id(i);
|
||||
let peer = gen_peer((i << 16) + i);
|
||||
|
||||
if i == 0 {
|
||||
opt_sender_key = Some(key);
|
||||
opt_sender_peer = Some(peer.to_response_peer());
|
||||
}
|
||||
|
||||
peer_map.insert(key, peer);
|
||||
}
|
||||
|
||||
let mut rng = thread_rng();
|
||||
|
||||
let peers = extract_response_peers(
|
||||
&mut rng,
|
||||
&peer_map,
|
||||
req_num_peers,
|
||||
opt_sender_key.unwrap_or_else(|| gen_peer_id(1)),
|
||||
Peer::to_response_peer,
|
||||
);
|
||||
|
||||
// Check that number of returned peers is correct
|
||||
|
||||
let mut success = peers.len() <= req_num_peers;
|
||||
|
||||
if req_num_peers >= gen_num_peers as usize {
|
||||
success &= peers.len() == gen_num_peers as usize
|
||||
|| peers.len() + 1 == gen_num_peers as usize;
|
||||
}
|
||||
|
||||
// Check that returned peers are unique (no overlap) and that sender
|
||||
// isn't returned
|
||||
|
||||
let mut ip_addresses = HashSet::with_capacity(peers.len());
|
||||
|
||||
for peer in peers {
|
||||
if peer == opt_sender_peer.clone().unwrap()
|
||||
|| ip_addresses.contains(&peer.ip_address)
|
||||
{
|
||||
success = false;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
ip_addresses.insert(peer.ip_address);
|
||||
}
|
||||
|
||||
TestResult::from_bool(success)
|
||||
}
|
||||
|
||||
quickcheck(prop as fn((u16, u16)) -> TestResult);
|
||||
}
|
||||
}
|
||||
22
crates/udp/templates/statistics.css
Normal file
22
crates/udp/templates/statistics.css
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
body {
|
||||
font-family: arial, sans-serif;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse
|
||||
}
|
||||
|
||||
caption {
|
||||
caption-side: bottom;
|
||||
padding-top: 0.5rem;
|
||||
}
|
||||
|
||||
th, td {
|
||||
padding: 0.5rem 2rem;
|
||||
border: 1px solid #ccc;
|
||||
}
|
||||
|
||||
th {
|
||||
background-color: #eee;
|
||||
}
|
||||
278
crates/udp/templates/statistics.html
Normal file
278
crates/udp/templates/statistics.html
Normal file
|
|
@ -0,0 +1,278 @@
|
|||
<!doctype html>
|
||||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>UDP BitTorrent tracker statistics</title>
|
||||
|
||||
{#- Include stylesheet like this to prevent code editor syntax warnings #}
|
||||
{ stylesheet | unescaped }
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<h1>BitTorrent tracker statistics</h1>
|
||||
|
||||
{#- <p> <strong>Tracker software:</strong> <a href="https://github.com/greatest-ape/aquatic">aquatic_udp</a> </p> #}
|
||||
|
||||
<p>
|
||||
<strong>Updated:</strong> { last_updated } (UTC)
|
||||
</p>
|
||||
|
||||
{{ if ipv4_active }}
|
||||
|
||||
<h2>IPv4</h2>
|
||||
|
||||
<table>
|
||||
<caption>* Peer count is updated every { peer_update_interval } seconds</caption>
|
||||
<tr>
|
||||
<th scope="row">Number of torrents</th>
|
||||
<td>{ ipv4.num_torrents }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Number of peers</th>
|
||||
<td>{ ipv4.num_peers } *</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Requests / second</th>
|
||||
<td>{ ipv4.requests_per_second }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Total responses / second</th>
|
||||
<td>{ ipv4.responses_per_second_total }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Connect responses / second</th>
|
||||
<td>{ ipv4.responses_per_second_connect }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Announce responses / second</th>
|
||||
<td>{ ipv4.responses_per_second_announce }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Scrape responses / second</th>
|
||||
<td>{ ipv4.responses_per_second_scrape }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Error responses / second</th>
|
||||
<td>{ ipv4.responses_per_second_error }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Bandwidth (RX)</th>
|
||||
<td>{ ipv4.rx_mbits } mbit/s</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Bandwidth (TX)</th>
|
||||
<td>{ ipv4.tx_mbits } mbit/s</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
{{ if extended_active }}
|
||||
|
||||
<h3>Peers per torrent</h3>
|
||||
|
||||
<table>
|
||||
<caption>Updated every { peer_update_interval } seconds</caption>
|
||||
<tr>
|
||||
<th scope="row">Minimum</th>
|
||||
<td>{ ipv4.peer_histogram.min }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">10th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p10 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">20th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p20 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">30th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p30 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">40th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p40 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">50th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p50 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">60th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p60 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">70th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p70 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">80th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p80 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">90th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p90 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">95th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p95 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">99th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p99 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">99.9th percentile</th>
|
||||
<td>{ ipv4.peer_histogram.p999 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Maximum</th>
|
||||
<td>{ ipv4.peer_histogram.max }</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
{{ endif }}
|
||||
|
||||
{{ endif }}
|
||||
|
||||
{{ if ipv6_active }}
|
||||
|
||||
<h2>IPv6</h2>
|
||||
|
||||
<table>
|
||||
<caption>* Peer count is updated every { peer_update_interval } seconds</caption>
|
||||
<tr>
|
||||
<th scope="row">Number of torrents</th>
|
||||
<td>{ ipv6.num_torrents }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Number of peers</th>
|
||||
<td>{ ipv6.num_peers } *</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Requests / second</th>
|
||||
<td>{ ipv6.requests_per_second }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Total responses / second</th>
|
||||
<td>{ ipv6.responses_per_second_total }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Connect responses / second</th>
|
||||
<td>{ ipv6.responses_per_second_connect }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Announce responses / second</th>
|
||||
<td>{ ipv6.responses_per_second_announce }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Scrape responses / second</th>
|
||||
<td>{ ipv6.responses_per_second_scrape }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Error responses / second</th>
|
||||
<td>{ ipv6.responses_per_second_error }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Bandwidth (RX)</th>
|
||||
<td>{ ipv6.rx_mbits } mbit/s</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Bandwidth (TX)</th>
|
||||
<td>{ ipv6.tx_mbits } mbit/s</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
{{ if extended_active }}
|
||||
|
||||
<h3>Peers per torrent</h3>
|
||||
|
||||
<table>
|
||||
<caption>Updated every { peer_update_interval } seconds</caption>
|
||||
<tr>
|
||||
<th scope="row">Minimum</th>
|
||||
<td>{ ipv6.peer_histogram.min }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">10th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p10 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">20th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p20 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">30th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p30 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">40th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p40 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">50th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p50 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">60th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p60 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">70th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p70 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">80th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p80 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">90th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p90 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">95th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p95 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">99th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p99 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">99.9th percentile</th>
|
||||
<td>{ ipv6.peer_histogram.p999 }</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th scope="row">Maximum</th>
|
||||
<td>{ ipv6.peer_histogram.max }</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
{{ endif }}
|
||||
|
||||
{{ endif }}
|
||||
|
||||
{{ if extended_active }}
|
||||
|
||||
<h2>Peer clients</h2>
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Client</th>
|
||||
<th>Count</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ for value in peer_clients }}
|
||||
<tr>
|
||||
<td>{ value.0 }</td>
|
||||
<td>{ value.1 }</td>
|
||||
</tr>
|
||||
{{ endfor }}
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{{ endif }}
|
||||
</body>
|
||||
</html>
|
||||
108
crates/udp/tests/access_list.rs
Normal file
108
crates/udp/tests/access_list.rs
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
mod common;
|
||||
|
||||
use common::*;
|
||||
|
||||
use std::{
|
||||
fs::File,
|
||||
io::Write,
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4, UdpSocket},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::access_list::AccessListMode;
|
||||
use aquatic_udp::config::Config;
|
||||
use aquatic_udp_protocol::{InfoHash, Response};
|
||||
|
||||
#[test]
|
||||
fn test_access_list_deny() -> anyhow::Result<()> {
|
||||
const TRACKER_PORT: u16 = 40_113;
|
||||
|
||||
let deny = InfoHash([0; 20]);
|
||||
let allow = InfoHash([1; 20]);
|
||||
|
||||
test_access_list(TRACKER_PORT, allow, deny, deny, AccessListMode::Deny)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_access_list_allow() -> anyhow::Result<()> {
|
||||
const TRACKER_PORT: u16 = 40_114;
|
||||
|
||||
let allow = InfoHash([0; 20]);
|
||||
let deny = InfoHash([1; 20]);
|
||||
|
||||
test_access_list(TRACKER_PORT, allow, deny, allow, AccessListMode::Allow)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_access_list(
|
||||
tracker_port: u16,
|
||||
info_hash_success: InfoHash,
|
||||
info_hash_fail: InfoHash,
|
||||
info_hash_in_list: InfoHash,
|
||||
mode: AccessListMode,
|
||||
) -> anyhow::Result<()> {
|
||||
let access_list_dir = tempfile::tempdir().with_context(|| "get temporary directory")?;
|
||||
let access_list_path = access_list_dir.path().join("access-list.txt");
|
||||
|
||||
let mut access_list_file =
|
||||
File::create(&access_list_path).with_context(|| "create access list file")?;
|
||||
writeln!(
|
||||
access_list_file,
|
||||
"{}",
|
||||
hex::encode_upper(info_hash_in_list.0)
|
||||
)
|
||||
.with_context(|| "write to access list file")?;
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
config.network.address.set_port(tracker_port);
|
||||
|
||||
config.access_list.mode = mode;
|
||||
config.access_list.path = access_list_path;
|
||||
|
||||
run_tracker(config);
|
||||
|
||||
let tracker_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, tracker_port));
|
||||
let peer_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0));
|
||||
|
||||
let socket = UdpSocket::bind(peer_addr)?;
|
||||
socket.set_read_timeout(Some(Duration::from_secs(1)))?;
|
||||
|
||||
let connection_id = connect(&socket, tracker_addr).with_context(|| "connect")?;
|
||||
|
||||
let response = announce(
|
||||
&socket,
|
||||
tracker_addr,
|
||||
connection_id,
|
||||
1,
|
||||
info_hash_fail,
|
||||
10,
|
||||
false,
|
||||
)
|
||||
.with_context(|| "announce")?;
|
||||
|
||||
assert!(
|
||||
matches!(response, Response::Error(_)),
|
||||
"response should be error but is {:?}",
|
||||
response
|
||||
);
|
||||
|
||||
let response = announce(
|
||||
&socket,
|
||||
tracker_addr,
|
||||
connection_id,
|
||||
1,
|
||||
info_hash_success,
|
||||
10,
|
||||
false,
|
||||
)
|
||||
.with_context(|| "announce")?;
|
||||
|
||||
assert!(matches!(response, Response::AnnounceIpv4(_)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
123
crates/udp/tests/common/mod.rs
Normal file
123
crates/udp/tests/common/mod.rs
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
use std::{
|
||||
io::Cursor,
|
||||
net::{SocketAddr, UdpSocket},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_udp::{common::BUFFER_SIZE, config::Config};
|
||||
use aquatic_udp_protocol::{
|
||||
common::PeerId, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, InfoHash,
|
||||
NumberOfBytes, NumberOfPeers, PeerKey, Port, Request, Response, ScrapeRequest, ScrapeResponse,
|
||||
TransactionId,
|
||||
};
|
||||
|
||||
// FIXME: should ideally try different ports and use sync primitives to find
|
||||
// out if tracker was successfully started
|
||||
pub fn run_tracker(config: Config) {
|
||||
::std::thread::spawn(move || {
|
||||
aquatic_udp::run(config).unwrap();
|
||||
});
|
||||
|
||||
::std::thread::sleep(Duration::from_secs(1));
|
||||
}
|
||||
|
||||
pub fn connect(socket: &UdpSocket, tracker_addr: SocketAddr) -> anyhow::Result<ConnectionId> {
|
||||
let request = Request::Connect(ConnectRequest {
|
||||
transaction_id: TransactionId(0),
|
||||
});
|
||||
|
||||
let response = request_and_response(&socket, tracker_addr, request)?;
|
||||
|
||||
if let Response::Connect(response) = response {
|
||||
Ok(response.connection_id)
|
||||
} else {
|
||||
Err(anyhow::anyhow!("not connect response: {:?}", response))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn announce(
|
||||
socket: &UdpSocket,
|
||||
tracker_addr: SocketAddr,
|
||||
connection_id: ConnectionId,
|
||||
peer_port: u16,
|
||||
info_hash: InfoHash,
|
||||
peers_wanted: usize,
|
||||
seeder: bool,
|
||||
) -> anyhow::Result<Response> {
|
||||
let mut peer_id = PeerId([0; 20]);
|
||||
|
||||
for chunk in peer_id.0.chunks_exact_mut(2) {
|
||||
chunk.copy_from_slice(&peer_port.to_ne_bytes());
|
||||
}
|
||||
|
||||
let request = Request::Announce(AnnounceRequest {
|
||||
connection_id,
|
||||
transaction_id: TransactionId(0),
|
||||
info_hash,
|
||||
peer_id,
|
||||
bytes_downloaded: NumberOfBytes(0),
|
||||
bytes_uploaded: NumberOfBytes(0),
|
||||
bytes_left: NumberOfBytes(if seeder { 0 } else { 1 }),
|
||||
event: AnnounceEvent::Started,
|
||||
ip_address: None,
|
||||
key: PeerKey(0),
|
||||
peers_wanted: NumberOfPeers(peers_wanted as i32),
|
||||
port: Port(peer_port),
|
||||
});
|
||||
|
||||
Ok(request_and_response(&socket, tracker_addr, request)?)
|
||||
}
|
||||
|
||||
pub fn scrape(
|
||||
socket: &UdpSocket,
|
||||
tracker_addr: SocketAddr,
|
||||
connection_id: ConnectionId,
|
||||
info_hashes: Vec<InfoHash>,
|
||||
) -> anyhow::Result<ScrapeResponse> {
|
||||
let request = Request::Scrape(ScrapeRequest {
|
||||
connection_id,
|
||||
transaction_id: TransactionId(0),
|
||||
info_hashes,
|
||||
});
|
||||
|
||||
let response = request_and_response(&socket, tracker_addr, request)?;
|
||||
|
||||
if let Response::Scrape(response) = response {
|
||||
Ok(response)
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("not scrape response: {:?}", response));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn request_and_response(
|
||||
socket: &UdpSocket,
|
||||
tracker_addr: SocketAddr,
|
||||
request: Request,
|
||||
) -> anyhow::Result<Response> {
|
||||
let mut buffer = [0u8; BUFFER_SIZE];
|
||||
|
||||
{
|
||||
let mut buffer = Cursor::new(&mut buffer[..]);
|
||||
|
||||
request
|
||||
.write(&mut buffer)
|
||||
.with_context(|| "write request")?;
|
||||
|
||||
let bytes_written = buffer.position() as usize;
|
||||
|
||||
socket
|
||||
.send_to(&(buffer.into_inner())[..bytes_written], tracker_addr)
|
||||
.with_context(|| "send request")?;
|
||||
}
|
||||
|
||||
{
|
||||
let (bytes_read, _) = socket
|
||||
.recv_from(&mut buffer)
|
||||
.with_context(|| "recv response")?;
|
||||
|
||||
Ok(Response::from_bytes(&buffer[..bytes_read], true).with_context(|| "parse response")?)
|
||||
}
|
||||
}
|
||||
94
crates/udp/tests/invalid_connection_id.rs
Normal file
94
crates/udp/tests/invalid_connection_id.rs
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
mod common;
|
||||
|
||||
use common::*;
|
||||
|
||||
use std::{
|
||||
io::{Cursor, ErrorKind},
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4, UdpSocket},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_udp::{common::BUFFER_SIZE, config::Config};
|
||||
use aquatic_udp_protocol::{
|
||||
common::PeerId, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes,
|
||||
NumberOfPeers, PeerKey, Port, Request, ScrapeRequest, TransactionId,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_invalid_connection_id() -> anyhow::Result<()> {
|
||||
const TRACKER_PORT: u16 = 40_112;
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
config.network.address.set_port(TRACKER_PORT);
|
||||
|
||||
run_tracker(config);
|
||||
|
||||
let tracker_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, TRACKER_PORT));
|
||||
let peer_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0));
|
||||
|
||||
let socket = UdpSocket::bind(peer_addr)?;
|
||||
|
||||
socket.set_read_timeout(Some(Duration::from_secs(1)))?;
|
||||
|
||||
// Send connect request to make sure that the tracker in fact responds to
|
||||
// valid requests
|
||||
let connection_id = connect(&socket, tracker_addr).with_context(|| "connect")?;
|
||||
|
||||
let invalid_connection_id = ConnectionId(!connection_id.0);
|
||||
|
||||
let announce_request = Request::Announce(AnnounceRequest {
|
||||
connection_id: invalid_connection_id,
|
||||
transaction_id: TransactionId(0),
|
||||
info_hash: InfoHash([0; 20]),
|
||||
peer_id: PeerId([0; 20]),
|
||||
bytes_downloaded: NumberOfBytes(0),
|
||||
bytes_uploaded: NumberOfBytes(0),
|
||||
bytes_left: NumberOfBytes(0),
|
||||
event: AnnounceEvent::Started,
|
||||
ip_address: None,
|
||||
key: PeerKey(0),
|
||||
peers_wanted: NumberOfPeers(10),
|
||||
port: Port(1),
|
||||
});
|
||||
|
||||
let scrape_request = Request::Scrape(ScrapeRequest {
|
||||
connection_id: invalid_connection_id,
|
||||
transaction_id: TransactionId(0),
|
||||
info_hashes: vec![InfoHash([0; 20])],
|
||||
});
|
||||
|
||||
no_response(&socket, tracker_addr, announce_request).with_context(|| "announce")?;
|
||||
no_response(&socket, tracker_addr, scrape_request).with_context(|| "scrape")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn no_response(
|
||||
socket: &UdpSocket,
|
||||
tracker_addr: SocketAddr,
|
||||
request: Request,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut buffer = [0u8; BUFFER_SIZE];
|
||||
|
||||
{
|
||||
let mut buffer = Cursor::new(&mut buffer[..]);
|
||||
|
||||
request
|
||||
.write(&mut buffer)
|
||||
.with_context(|| "write request")?;
|
||||
|
||||
let bytes_written = buffer.position() as usize;
|
||||
|
||||
socket
|
||||
.send_to(&(buffer.into_inner())[..bytes_written], tracker_addr)
|
||||
.with_context(|| "send request")?;
|
||||
}
|
||||
|
||||
match socket.recv_from(&mut buffer) {
|
||||
Ok(_) => Err(anyhow::anyhow!("received response")),
|
||||
Err(err) if err.kind() == ErrorKind::WouldBlock => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
99
crates/udp/tests/requests_responses.rs
Normal file
99
crates/udp/tests/requests_responses.rs
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
mod common;
|
||||
|
||||
use common::*;
|
||||
|
||||
use std::{
|
||||
collections::{hash_map::RandomState, HashSet},
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4, UdpSocket},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_udp::config::Config;
|
||||
use aquatic_udp_protocol::{InfoHash, Response};
|
||||
|
||||
#[test]
|
||||
fn test_multiple_connect_announce_scrape() -> anyhow::Result<()> {
|
||||
const TRACKER_PORT: u16 = 40_111;
|
||||
const PEER_PORT_START: u16 = 30_000;
|
||||
const PEERS_WANTED: usize = 10;
|
||||
|
||||
let mut config = Config::default();
|
||||
|
||||
config.network.address.set_port(TRACKER_PORT);
|
||||
|
||||
run_tracker(config);
|
||||
|
||||
let tracker_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, TRACKER_PORT));
|
||||
let peer_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0));
|
||||
|
||||
let info_hash = InfoHash([0; 20]);
|
||||
|
||||
let mut num_seeders = 0;
|
||||
let mut num_leechers = 0;
|
||||
|
||||
for i in 0..20 {
|
||||
let is_seeder = i % 3 == 0;
|
||||
|
||||
if is_seeder {
|
||||
num_seeders += 1;
|
||||
} else {
|
||||
num_leechers += 1;
|
||||
}
|
||||
|
||||
let socket = UdpSocket::bind(peer_addr)?;
|
||||
socket.set_read_timeout(Some(Duration::from_secs(1)))?;
|
||||
|
||||
let connection_id = connect(&socket, tracker_addr).with_context(|| "connect")?;
|
||||
|
||||
let announce_response = {
|
||||
let response = announce(
|
||||
&socket,
|
||||
tracker_addr,
|
||||
connection_id,
|
||||
PEER_PORT_START + i as u16,
|
||||
info_hash,
|
||||
PEERS_WANTED,
|
||||
is_seeder,
|
||||
)
|
||||
.with_context(|| "announce")?;
|
||||
|
||||
if let Response::AnnounceIpv4(response) = response {
|
||||
response
|
||||
} else {
|
||||
return Err(anyhow::anyhow!("not announce response: {:?}", response));
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(announce_response.peers.len(), i.min(PEERS_WANTED));
|
||||
|
||||
assert_eq!(announce_response.seeders.0, num_seeders);
|
||||
assert_eq!(announce_response.leechers.0, num_leechers);
|
||||
|
||||
let response_peer_ports: HashSet<u16, RandomState> =
|
||||
HashSet::from_iter(announce_response.peers.iter().map(|p| p.port.0));
|
||||
let expected_peer_ports: HashSet<u16, RandomState> =
|
||||
HashSet::from_iter((0..i).map(|i| PEER_PORT_START + i as u16));
|
||||
|
||||
if i > PEERS_WANTED {
|
||||
assert!(response_peer_ports.is_subset(&expected_peer_ports));
|
||||
} else {
|
||||
assert_eq!(response_peer_ports, expected_peer_ports);
|
||||
}
|
||||
|
||||
let scrape_response = scrape(
|
||||
&socket,
|
||||
tracker_addr,
|
||||
connection_id,
|
||||
vec![info_hash, InfoHash([1; 20])],
|
||||
)
|
||||
.with_context(|| "scrape")?;
|
||||
|
||||
assert_eq!(scrape_response.torrent_stats[0].seeders.0, num_seeders);
|
||||
assert_eq!(scrape_response.torrent_stats[0].leechers.0, num_leechers);
|
||||
assert_eq!(scrape_response.torrent_stats[1].seeders.0, 0);
|
||||
assert_eq!(scrape_response.torrent_stats[1].leechers.0, 0);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
27
crates/udp_bench/Cargo.toml
Normal file
27
crates/udp_bench/Cargo.toml
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
[package]
|
||||
name = "aquatic_udp_bench"
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_udp_bench"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
aquatic_udp.workspace = true
|
||||
aquatic_udp_protocol.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
crossbeam-channel = "0.5"
|
||||
indicatif = "0.17"
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
num-format = "0.4"
|
||||
rand_distr = "0.4"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
115
crates/udp_bench/src/announce.rs
Normal file
115
crates/udp_bench/src/announce.rs
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use crossbeam_channel::{Receiver, Sender};
|
||||
use indicatif::ProgressIterator;
|
||||
use rand::Rng;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use aquatic_udp::common::*;
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::BenchConfig;
|
||||
|
||||
pub fn bench_announce_handler(
|
||||
bench_config: &BenchConfig,
|
||||
request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
|
||||
response_receiver: &Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &[InfoHash],
|
||||
) -> (usize, Duration) {
|
||||
let requests = create_requests(rng, info_hashes, bench_config.num_announce_requests);
|
||||
|
||||
let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers
|
||||
let mut num_responses = 0usize;
|
||||
|
||||
let mut dummy: u16 = rng.gen();
|
||||
|
||||
let pb = create_progress_bar("Announce", bench_config.num_rounds as u64);
|
||||
|
||||
// Start benchmark
|
||||
|
||||
let before = Instant::now();
|
||||
|
||||
for round in (0..bench_config.num_rounds).progress_with(pb) {
|
||||
for request_chunk in requests.chunks(p) {
|
||||
for (request, src) in request_chunk {
|
||||
request_sender
|
||||
.send((
|
||||
SocketWorkerIndex(0),
|
||||
ConnectedRequest::Announce(request.clone()),
|
||||
*src,
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
while let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.try_recv() {
|
||||
num_responses += 1;
|
||||
|
||||
if let Some(last_peer) = r.peers.last() {
|
||||
dummy ^= last_peer.port.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total = bench_config.num_announce_requests * (round + 1);
|
||||
|
||||
while num_responses < total {
|
||||
if let Ok((ConnectedResponse::AnnounceIpv4(r), _)) = response_receiver.recv() {
|
||||
num_responses += 1;
|
||||
|
||||
if let Some(last_peer) = r.peers.last() {
|
||||
dummy ^= last_peer.port.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = before.elapsed();
|
||||
|
||||
if dummy == 0 {
|
||||
println!("dummy dummy");
|
||||
}
|
||||
|
||||
(num_responses, elapsed)
|
||||
}
|
||||
|
||||
pub fn create_requests(
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &[InfoHash],
|
||||
number: usize,
|
||||
) -> Vec<(AnnounceRequest, CanonicalSocketAddr)> {
|
||||
let gamma = Gamma::new(GAMMA_SHAPE, GAMMA_SCALE).unwrap();
|
||||
|
||||
let max_index = info_hashes.len() - 1;
|
||||
|
||||
let mut requests = Vec::new();
|
||||
|
||||
for _ in 0..number {
|
||||
let info_hash_index = gamma_usize(rng, gamma, max_index);
|
||||
|
||||
let request = AnnounceRequest {
|
||||
connection_id: ConnectionId(0),
|
||||
transaction_id: TransactionId(rng.gen()),
|
||||
info_hash: info_hashes[info_hash_index],
|
||||
peer_id: PeerId(rng.gen()),
|
||||
bytes_downloaded: NumberOfBytes(rng.gen()),
|
||||
bytes_uploaded: NumberOfBytes(rng.gen()),
|
||||
bytes_left: NumberOfBytes(rng.gen()),
|
||||
event: AnnounceEvent::Started,
|
||||
ip_address: None,
|
||||
key: PeerKey(rng.gen()),
|
||||
peers_wanted: NumberOfPeers(rng.gen()),
|
||||
port: Port(rng.gen()),
|
||||
};
|
||||
|
||||
requests.push((
|
||||
request,
|
||||
CanonicalSocketAddr::new(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 1))),
|
||||
));
|
||||
}
|
||||
|
||||
requests
|
||||
}
|
||||
24
crates/udp_bench/src/common.rs
Normal file
24
crates/udp_bench/src/common.rs
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use rand::Rng;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
pub const GAMMA_SHAPE: f64 = 0.2;
|
||||
pub const GAMMA_SCALE: f64 = 100.0;
|
||||
|
||||
pub const NUM_INFO_HASHES: usize = 10_000;
|
||||
|
||||
pub fn create_progress_bar(name: &str, iterations: u64) -> ProgressBar {
|
||||
let t = format!("{:<8} {}", name, "{wide_bar} {pos:>2}/{len:>2}");
|
||||
let style = ProgressStyle::default_bar()
|
||||
.template(&t)
|
||||
.expect("setup indicatif template");
|
||||
|
||||
ProgressBar::new(iterations).with_style(style)
|
||||
}
|
||||
|
||||
pub fn gamma_usize(rng: &mut impl Rng, gamma: Gamma<f64>, max: usize) -> usize {
|
||||
let p: f64 = rng.sample(gamma);
|
||||
let p = (p.min(101.0f64) - 1.0) / 100.0;
|
||||
|
||||
(p * max as f64) as usize
|
||||
}
|
||||
35
crates/udp_bench/src/config.rs
Normal file
35
crates/udp_bench/src/config.rs
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
use aquatic_toml_config::TomlConfig;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct BenchConfig {
|
||||
pub num_rounds: usize,
|
||||
pub num_threads: usize,
|
||||
pub num_connect_requests: usize,
|
||||
pub num_announce_requests: usize,
|
||||
pub num_scrape_requests: usize,
|
||||
pub num_hashes_per_scrape_request: usize,
|
||||
}
|
||||
|
||||
impl Default for BenchConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
num_rounds: 10,
|
||||
num_threads: 2,
|
||||
num_connect_requests: 5_000_000,
|
||||
num_announce_requests: 2_000_000,
|
||||
num_scrape_requests: 2_000_000,
|
||||
num_hashes_per_scrape_request: 20,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for BenchConfig {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::BenchConfig;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(BenchConfig);
|
||||
}
|
||||
127
crates/udp_bench/src/main.rs
Normal file
127
crates/udp_bench/src/main.rs
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
//! Benchmark announce and scrape handlers
|
||||
//!
|
||||
//! Example outputs:
|
||||
//! ```
|
||||
//! # Results over 10 rounds with 2 threads
|
||||
//! Announce: 429 540 requests/second, 2328.07 ns/request
|
||||
//! Scrape: 1 873 545 requests/second, 533.75 ns/request
|
||||
//! ```
|
||||
|
||||
use aquatic_common::{PanicSentinelWatcher, ServerStartInstant};
|
||||
use aquatic_udp::workers::swarm::run_swarm_worker;
|
||||
use crossbeam_channel::unbounded;
|
||||
use num_format::{Locale, ToFormattedString};
|
||||
use rand::{rngs::SmallRng, thread_rng, Rng, SeedableRng};
|
||||
use std::time::Duration;
|
||||
|
||||
use aquatic_common::cli::run_app_with_cli_and_config;
|
||||
use aquatic_udp::common::*;
|
||||
use aquatic_udp::config::Config;
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use config::BenchConfig;
|
||||
|
||||
mod announce;
|
||||
mod common;
|
||||
mod config;
|
||||
mod scrape;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn main() {
|
||||
run_app_with_cli_and_config::<BenchConfig>(
|
||||
"aquatic_udp_bench: Run aquatic_udp benchmarks",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn run(bench_config: BenchConfig) -> ::anyhow::Result<()> {
|
||||
// Setup common state, spawn request handlers
|
||||
|
||||
let mut aquatic_config = Config::default();
|
||||
let (_, sentinel) = PanicSentinelWatcher::create_with_sentinel();
|
||||
|
||||
aquatic_config.cleaning.torrent_cleaning_interval = 60 * 60 * 24;
|
||||
|
||||
let (request_sender, request_receiver) = unbounded();
|
||||
let (response_sender, response_receiver) = unbounded();
|
||||
|
||||
let response_sender = ConnectedResponseSender::new(vec![response_sender]);
|
||||
let (statistics_sender, _) = unbounded();
|
||||
|
||||
let server_start_instant = ServerStartInstant::new();
|
||||
|
||||
{
|
||||
let config = aquatic_config.clone();
|
||||
let state = State::new(config.swarm_workers);
|
||||
|
||||
::std::thread::spawn(move || {
|
||||
run_swarm_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
server_start_instant,
|
||||
request_receiver,
|
||||
response_sender,
|
||||
statistics_sender,
|
||||
SwarmWorkerIndex(0),
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
// Run benchmarks
|
||||
|
||||
let mut rng = SmallRng::from_rng(thread_rng()).unwrap();
|
||||
let info_hashes = create_info_hashes(&mut rng);
|
||||
|
||||
let a = announce::bench_announce_handler(
|
||||
&bench_config,
|
||||
&request_sender,
|
||||
&response_receiver,
|
||||
&mut rng,
|
||||
&info_hashes,
|
||||
);
|
||||
|
||||
let s = scrape::bench_scrape_handler(
|
||||
&bench_config,
|
||||
&request_sender,
|
||||
&response_receiver,
|
||||
&mut rng,
|
||||
&info_hashes,
|
||||
);
|
||||
|
||||
println!(
|
||||
"\n# Results over {} rounds with {} threads",
|
||||
bench_config.num_rounds, bench_config.num_threads,
|
||||
);
|
||||
|
||||
print_results("Announce:", a.0, a.1);
|
||||
print_results("Scrape: ", s.0, s.1);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn print_results(request_type: &str, num_responses: usize, duration: Duration) {
|
||||
let per_second = ((num_responses as f64 / (duration.as_micros() as f64 / 1000000.0)) as usize)
|
||||
.to_formatted_string(&Locale::se);
|
||||
|
||||
let time_per_request = duration.as_nanos() as f64 / (num_responses as f64);
|
||||
|
||||
println!(
|
||||
"{} {:>10} requests/second, {:>8.2} ns/request",
|
||||
request_type, per_second, time_per_request,
|
||||
);
|
||||
}
|
||||
|
||||
fn create_info_hashes(rng: &mut impl Rng) -> Vec<InfoHash> {
|
||||
let mut info_hashes = Vec::new();
|
||||
|
||||
for _ in 0..common::NUM_INFO_HASHES {
|
||||
info_hashes.push(InfoHash(rng.gen()));
|
||||
}
|
||||
|
||||
info_hashes
|
||||
}
|
||||
123
crates/udp_bench/src/scrape.rs
Normal file
123
crates/udp_bench/src/scrape.rs
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use aquatic_common::CanonicalSocketAddr;
|
||||
use crossbeam_channel::{Receiver, Sender};
|
||||
use indicatif::ProgressIterator;
|
||||
use rand::Rng;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use aquatic_udp::common::*;
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::BenchConfig;
|
||||
|
||||
pub fn bench_scrape_handler(
|
||||
bench_config: &BenchConfig,
|
||||
request_sender: &Sender<(SocketWorkerIndex, ConnectedRequest, CanonicalSocketAddr)>,
|
||||
response_receiver: &Receiver<(ConnectedResponse, CanonicalSocketAddr)>,
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &[InfoHash],
|
||||
) -> (usize, Duration) {
|
||||
let requests = create_requests(
|
||||
rng,
|
||||
info_hashes,
|
||||
bench_config.num_scrape_requests,
|
||||
bench_config.num_hashes_per_scrape_request,
|
||||
);
|
||||
|
||||
let p = 10_000 * bench_config.num_threads; // FIXME: adjust to sharded workers
|
||||
let mut num_responses = 0usize;
|
||||
|
||||
let mut dummy: i32 = rng.gen();
|
||||
|
||||
let pb = create_progress_bar("Scrape", bench_config.num_rounds as u64);
|
||||
|
||||
// Start benchmark
|
||||
|
||||
let before = Instant::now();
|
||||
|
||||
for round in (0..bench_config.num_rounds).progress_with(pb) {
|
||||
for request_chunk in requests.chunks(p) {
|
||||
for (request, src) in request_chunk {
|
||||
let request = ConnectedRequest::Scrape(PendingScrapeRequest {
|
||||
slab_key: 0,
|
||||
info_hashes: request
|
||||
.info_hashes
|
||||
.clone()
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.collect(),
|
||||
});
|
||||
|
||||
request_sender
|
||||
.send((SocketWorkerIndex(0), request, *src))
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
while let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.try_recv() {
|
||||
num_responses += 1;
|
||||
|
||||
if let Some(stat) = response.torrent_stats.values().last() {
|
||||
dummy ^= stat.leechers.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let total = bench_config.num_scrape_requests * (round + 1);
|
||||
|
||||
while num_responses < total {
|
||||
if let Ok((ConnectedResponse::Scrape(response), _)) = response_receiver.recv() {
|
||||
num_responses += 1;
|
||||
|
||||
if let Some(stat) = response.torrent_stats.values().last() {
|
||||
dummy ^= stat.leechers.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = before.elapsed();
|
||||
|
||||
if dummy == 0 {
|
||||
println!("dummy dummy");
|
||||
}
|
||||
|
||||
(num_responses, elapsed)
|
||||
}
|
||||
|
||||
pub fn create_requests(
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &[InfoHash],
|
||||
number: usize,
|
||||
hashes_per_request: usize,
|
||||
) -> Vec<(ScrapeRequest, CanonicalSocketAddr)> {
|
||||
let gamma = Gamma::new(GAMMA_SHAPE, GAMMA_SCALE).unwrap();
|
||||
|
||||
let max_index = info_hashes.len() - 1;
|
||||
|
||||
let mut requests = Vec::new();
|
||||
|
||||
for _ in 0..number {
|
||||
let mut request_info_hashes = Vec::new();
|
||||
|
||||
for _ in 0..hashes_per_request {
|
||||
let info_hash_index = gamma_usize(rng, gamma, max_index);
|
||||
request_info_hashes.push(info_hashes[info_hash_index])
|
||||
}
|
||||
|
||||
let request = ScrapeRequest {
|
||||
connection_id: ConnectionId(0),
|
||||
transaction_id: TransactionId(rng.gen()),
|
||||
info_hashes: request_info_hashes,
|
||||
};
|
||||
|
||||
requests.push((
|
||||
request,
|
||||
CanonicalSocketAddr::new(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 1))),
|
||||
));
|
||||
}
|
||||
|
||||
requests
|
||||
}
|
||||
35
crates/udp_load_test/Cargo.toml
Normal file
35
crates/udp_load_test/Cargo.toml
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
[package]
|
||||
name = "aquatic_udp_load_test"
|
||||
description = "BitTorrent (UDP) load tester"
|
||||
keywords = ["udp", "benchmark", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[features]
|
||||
cpu-pinning = ["aquatic_common/hwloc"]
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_udp_load_test"
|
||||
|
||||
[dependencies]
|
||||
aquatic_common.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
aquatic_udp_protocol.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
hashbrown = "0.14"
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
mio = { version = "0.8", features = ["net", "os-poll"] }
|
||||
rand_distr = "0.4"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
49
crates/udp_load_test/src/common.rs
Normal file
49
crates/udp_load_test/src/common.rs
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
use std::sync::{atomic::AtomicUsize, Arc};
|
||||
|
||||
use hashbrown::HashMap;
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub struct TorrentPeer {
|
||||
pub info_hash: InfoHash,
|
||||
pub scrape_hash_indeces: Vec<usize>,
|
||||
pub connection_id: ConnectionId,
|
||||
pub peer_id: PeerId,
|
||||
pub port: Port,
|
||||
}
|
||||
|
||||
pub type TorrentPeerMap = HashMap<TransactionId, TorrentPeer>;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Statistics {
|
||||
pub requests: AtomicUsize,
|
||||
pub response_peers: AtomicUsize,
|
||||
pub responses_connect: AtomicUsize,
|
||||
pub responses_announce: AtomicUsize,
|
||||
pub responses_scrape: AtomicUsize,
|
||||
pub responses_error: AtomicUsize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LoadTestState {
|
||||
pub info_hashes: Arc<Vec<InfoHash>>,
|
||||
pub statistics: Arc<Statistics>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
pub enum RequestType {
|
||||
Announce,
|
||||
Connect,
|
||||
Scrape,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SocketWorkerLocalStatistics {
|
||||
pub requests: usize,
|
||||
pub response_peers: usize,
|
||||
pub responses_connect: usize,
|
||||
pub responses_announce: usize,
|
||||
pub responses_scrape: usize,
|
||||
pub responses_error: usize,
|
||||
}
|
||||
136
crates/udp_load_test/src/config.rs
Normal file
136
crates/udp_load_test/src/config.rs
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
use std::net::SocketAddr;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
use aquatic_common::cpu_pinning::desc::CpuPinningConfigDesc;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
/// aquatic_udp_load_test configuration
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Server address
|
||||
///
|
||||
/// If you want to send IPv4 requests to a IPv4+IPv6 tracker, put an IPv4
|
||||
/// address here.
|
||||
pub server_address: SocketAddr,
|
||||
pub log_level: LogLevel,
|
||||
/// Number of workers sending requests
|
||||
pub workers: u8,
|
||||
/// Run duration (quit and generate report after this many seconds)
|
||||
pub duration: usize,
|
||||
pub network: NetworkConfig,
|
||||
pub requests: RequestConfig,
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pub cpu_pinning: CpuPinningConfigDesc,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server_address: "127.0.0.1:3000".parse().unwrap(),
|
||||
log_level: LogLevel::Error,
|
||||
workers: 1,
|
||||
duration: 0,
|
||||
network: NetworkConfig::default(),
|
||||
requests: RequestConfig::default(),
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
cpu_pinning: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// True means bind to one localhost IP per socket.
|
||||
///
|
||||
/// The point of multiple IPs is to cause a better distribution
|
||||
/// of requests to servers with SO_REUSEPORT option.
|
||||
///
|
||||
/// Setting this to true can cause issues on macOS.
|
||||
pub multiple_client_ipv4s: bool,
|
||||
/// Number of first client port
|
||||
pub first_port: u16,
|
||||
/// Socket worker poll timeout in microseconds
|
||||
pub poll_timeout: u64,
|
||||
/// Socket worker polling event number
|
||||
pub poll_event_capacity: usize,
|
||||
/// Size of socket recv buffer. Use 0 for OS default.
|
||||
///
|
||||
/// This setting can have a big impact on dropped packages. It might
|
||||
/// require changing system defaults. Some examples of commands to set
|
||||
/// values for different operating systems:
|
||||
///
|
||||
/// macOS:
|
||||
/// $ sudo sysctl net.inet.udp.recvspace=6000000
|
||||
///
|
||||
/// Linux:
|
||||
/// $ sudo sysctl -w net.core.rmem_max=104857600
|
||||
/// $ sudo sysctl -w net.core.rmem_default=104857600
|
||||
pub recv_buffer: usize,
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
multiple_client_ipv4s: true,
|
||||
first_port: 45_000,
|
||||
poll_timeout: 276,
|
||||
poll_event_capacity: 2_877,
|
||||
recv_buffer: 6_000_000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct RequestConfig {
|
||||
/// Number of torrents to simulate
|
||||
pub number_of_torrents: usize,
|
||||
/// Maximum number of torrents to ask about in scrape requests
|
||||
pub scrape_max_torrents: usize,
|
||||
/// Probability that a generated request is a connect request as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_connect: usize,
|
||||
/// Probability that a generated request is a announce request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_announce: usize,
|
||||
/// Probability that a generated request is a scrape request, as part
|
||||
/// of sum of the various weight arguments.
|
||||
pub weight_scrape: usize,
|
||||
/// Peers choose torrents according to this Gamma distribution shape
|
||||
pub torrent_gamma_shape: f64,
|
||||
/// Peers choose torrents according to this Gamma distribution scale
|
||||
pub torrent_gamma_scale: f64,
|
||||
/// Probability that a generated peer is a seeder
|
||||
pub peer_seeder_probability: f64,
|
||||
/// Probability that an additional connect request will be sent for each
|
||||
/// mio event
|
||||
pub additional_request_probability: f32,
|
||||
}
|
||||
|
||||
impl Default for RequestConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
number_of_torrents: 10_000,
|
||||
scrape_max_torrents: 50,
|
||||
weight_connect: 0,
|
||||
weight_announce: 100,
|
||||
weight_scrape: 1,
|
||||
torrent_gamma_shape: 0.2,
|
||||
torrent_gamma_scale: 100.0,
|
||||
peer_seeder_probability: 0.25,
|
||||
additional_request_probability: 0.5,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
||||
203
crates/udp_load_test/src/main.rs
Normal file
203
crates/udp_load_test/src/main.rs
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
use std::thread::{self, Builder};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
use aquatic_common::cpu_pinning::{pin_current_if_configured_to, WorkerIndex};
|
||||
use rand_distr::Gamma;
|
||||
|
||||
mod common;
|
||||
mod config;
|
||||
mod utils;
|
||||
mod worker;
|
||||
|
||||
use common::*;
|
||||
use config::Config;
|
||||
use utils::*;
|
||||
use worker::*;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
pub fn main() {
|
||||
aquatic_common::cli::run_app_with_cli_and_config::<Config>(
|
||||
"aquatic_udp_load_test: BitTorrent load tester",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<aquatic_common::cli::LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
if config.requests.weight_announce
|
||||
+ config.requests.weight_connect
|
||||
+ config.requests.weight_scrape
|
||||
== 0
|
||||
{
|
||||
panic!("Error: at least one weight must be larger than zero.");
|
||||
}
|
||||
|
||||
println!("Starting client with config: {:#?}", config);
|
||||
|
||||
let mut info_hashes = Vec::with_capacity(config.requests.number_of_torrents);
|
||||
|
||||
for _ in 0..config.requests.number_of_torrents {
|
||||
info_hashes.push(generate_info_hash());
|
||||
}
|
||||
|
||||
let state = LoadTestState {
|
||||
info_hashes: Arc::new(info_hashes),
|
||||
statistics: Arc::new(Statistics::default()),
|
||||
};
|
||||
|
||||
let gamma = Gamma::new(
|
||||
config.requests.torrent_gamma_shape,
|
||||
config.requests.torrent_gamma_scale,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Start workers
|
||||
|
||||
for i in 0..config.workers {
|
||||
let port = config.network.first_port + (i as u16);
|
||||
|
||||
let ip = if config.server_address.is_ipv6() {
|
||||
Ipv6Addr::LOCALHOST.into()
|
||||
} else {
|
||||
if config.network.multiple_client_ipv4s {
|
||||
Ipv4Addr::new(127, 0, 0, 1 + i).into()
|
||||
} else {
|
||||
Ipv4Addr::LOCALHOST.into()
|
||||
}
|
||||
};
|
||||
|
||||
let addr = SocketAddr::new(ip, port);
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
|
||||
Builder::new().name("load-test".into()).spawn(move || {
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.workers as usize,
|
||||
0,
|
||||
WorkerIndex::SocketWorker(i as usize),
|
||||
);
|
||||
|
||||
run_worker_thread(state, gamma, &config, addr)
|
||||
})?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "cpu-pinning")]
|
||||
pin_current_if_configured_to(
|
||||
&config.cpu_pinning,
|
||||
config.workers as usize,
|
||||
0,
|
||||
WorkerIndex::Util,
|
||||
);
|
||||
|
||||
monitor_statistics(state, &config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn monitor_statistics(state: LoadTestState, config: &Config) {
|
||||
let mut report_avg_connect: Vec<f64> = Vec::new();
|
||||
let mut report_avg_announce: Vec<f64> = Vec::new();
|
||||
let mut report_avg_scrape: Vec<f64> = Vec::new();
|
||||
let mut report_avg_error: Vec<f64> = Vec::new();
|
||||
|
||||
let interval = 5;
|
||||
|
||||
let start_time = Instant::now();
|
||||
let duration = Duration::from_secs(config.duration as u64);
|
||||
|
||||
let mut last = start_time;
|
||||
|
||||
let time_elapsed = loop {
|
||||
thread::sleep(Duration::from_secs(interval));
|
||||
|
||||
let requests = fetch_and_reset(&state.statistics.requests);
|
||||
let response_peers = fetch_and_reset(&state.statistics.response_peers);
|
||||
let responses_connect = fetch_and_reset(&state.statistics.responses_connect);
|
||||
let responses_announce = fetch_and_reset(&state.statistics.responses_announce);
|
||||
let responses_scrape = fetch_and_reset(&state.statistics.responses_scrape);
|
||||
let responses_error = fetch_and_reset(&state.statistics.responses_error);
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
let elapsed = (now - last).as_secs_f64();
|
||||
|
||||
last = now;
|
||||
|
||||
let peers_per_announce_response = response_peers / responses_announce;
|
||||
|
||||
let avg_requests = requests / elapsed;
|
||||
let avg_responses_connect = responses_connect / elapsed;
|
||||
let avg_responses_announce = responses_announce / elapsed;
|
||||
let avg_responses_scrape = responses_scrape / elapsed;
|
||||
let avg_responses_error = responses_error / elapsed;
|
||||
|
||||
let avg_responses = avg_responses_connect
|
||||
+ avg_responses_announce
|
||||
+ avg_responses_scrape
|
||||
+ avg_responses_error;
|
||||
|
||||
report_avg_connect.push(avg_responses_connect);
|
||||
report_avg_announce.push(avg_responses_announce);
|
||||
report_avg_scrape.push(avg_responses_scrape);
|
||||
report_avg_error.push(avg_responses_error);
|
||||
|
||||
println!();
|
||||
println!("Requests out: {:.2}/second", avg_requests);
|
||||
println!("Responses in: {:.2}/second", avg_responses);
|
||||
println!(" - Connect responses: {:.2}", avg_responses_connect);
|
||||
println!(" - Announce responses: {:.2}", avg_responses_announce);
|
||||
println!(" - Scrape responses: {:.2}", avg_responses_scrape);
|
||||
println!(" - Error responses: {:.2}", avg_responses_error);
|
||||
println!(
|
||||
"Peers per announce response: {:.2}",
|
||||
peers_per_announce_response
|
||||
);
|
||||
|
||||
let time_elapsed = start_time.elapsed();
|
||||
|
||||
if config.duration != 0 && time_elapsed >= duration {
|
||||
break time_elapsed;
|
||||
}
|
||||
};
|
||||
|
||||
let len = report_avg_connect.len() as f64;
|
||||
|
||||
let avg_connect: f64 = report_avg_connect.into_iter().sum::<f64>() / len;
|
||||
let avg_announce: f64 = report_avg_announce.into_iter().sum::<f64>() / len;
|
||||
let avg_scrape: f64 = report_avg_scrape.into_iter().sum::<f64>() / len;
|
||||
let avg_error: f64 = report_avg_error.into_iter().sum::<f64>() / len;
|
||||
|
||||
let avg_total = avg_connect + avg_announce + avg_scrape + avg_error;
|
||||
|
||||
println!();
|
||||
println!("# aquatic load test report");
|
||||
println!();
|
||||
println!("Test ran for {} seconds", time_elapsed.as_secs());
|
||||
println!("Average responses per second: {:.2}", avg_total);
|
||||
println!(" - Connect responses: {:.2}", avg_connect);
|
||||
println!(" - Announce responses: {:.2}", avg_announce);
|
||||
println!(" - Scrape responses: {:.2}", avg_scrape);
|
||||
println!(" - Error responses: {:.2}", avg_error);
|
||||
println!();
|
||||
println!("Config: {:#?}", config);
|
||||
println!();
|
||||
}
|
||||
|
||||
fn fetch_and_reset(atomic_usize: &AtomicUsize) -> f64 {
|
||||
atomic_usize.fetch_and(0, Ordering::Relaxed) as f64
|
||||
}
|
||||
36
crates/udp_load_test/src/utils.rs
Normal file
36
crates/udp_load_test/src/utils.rs
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
pub fn gamma_usize(rng: &mut impl Rng, gamma: Gamma<f64>, max: usize) -> usize {
|
||||
let p: f64 = rng.sample(gamma);
|
||||
let p = (p.min(101.0f64) - 1.0) / 100.0;
|
||||
|
||||
(p * max as f64) as usize
|
||||
}
|
||||
|
||||
pub fn generate_peer_id() -> PeerId {
|
||||
PeerId(random_20_bytes())
|
||||
}
|
||||
|
||||
pub fn generate_info_hash() -> InfoHash {
|
||||
InfoHash(random_20_bytes())
|
||||
}
|
||||
|
||||
pub fn generate_transaction_id(rng: &mut impl Rng) -> TransactionId {
|
||||
TransactionId(rng.gen())
|
||||
}
|
||||
|
||||
pub fn create_connect_request(transaction_id: TransactionId) -> Request {
|
||||
(ConnectRequest { transaction_id }).into()
|
||||
}
|
||||
|
||||
// Don't use SmallRng here for now
|
||||
fn random_20_bytes() -> [u8; 20] {
|
||||
let mut bytes = [0; 20];
|
||||
|
||||
thread_rng().fill_bytes(&mut bytes[..]);
|
||||
|
||||
bytes
|
||||
}
|
||||
205
crates/udp_load_test/src/worker/mod.rs
Normal file
205
crates/udp_load_test/src/worker/mod.rs
Normal file
|
|
@ -0,0 +1,205 @@
|
|||
mod request_gen;
|
||||
|
||||
use std::io::Cursor;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Duration;
|
||||
|
||||
use mio::{net::UdpSocket, Events, Interest, Poll, Token};
|
||||
use rand::Rng;
|
||||
use rand::{prelude::SmallRng, thread_rng, SeedableRng};
|
||||
use rand_distr::Gamma;
|
||||
use socket2::{Domain, Protocol, Socket, Type};
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::{common::*, utils::*};
|
||||
use request_gen::process_response;
|
||||
|
||||
const MAX_PACKET_SIZE: usize = 8192;
|
||||
|
||||
pub fn run_worker_thread(
|
||||
state: LoadTestState,
|
||||
gamma: Gamma<f64>,
|
||||
config: &Config,
|
||||
addr: SocketAddr,
|
||||
) {
|
||||
let mut socket = UdpSocket::from_std(create_socket(config, addr));
|
||||
let mut buffer = [0u8; MAX_PACKET_SIZE];
|
||||
|
||||
let mut rng = SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng()");
|
||||
let mut torrent_peers = TorrentPeerMap::default();
|
||||
|
||||
let token = Token(0);
|
||||
let interests = Interest::READABLE;
|
||||
let timeout = Duration::from_micros(config.network.poll_timeout);
|
||||
|
||||
let mut poll = Poll::new().expect("create poll");
|
||||
|
||||
poll.registry()
|
||||
.register(&mut socket, token, interests)
|
||||
.unwrap();
|
||||
|
||||
let mut events = Events::with_capacity(config.network.poll_event_capacity);
|
||||
|
||||
let mut statistics = SocketWorkerLocalStatistics::default();
|
||||
|
||||
// Bootstrap request cycle
|
||||
let initial_request = create_connect_request(generate_transaction_id(&mut thread_rng()));
|
||||
send_request(&mut socket, &mut buffer, &mut statistics, initial_request);
|
||||
|
||||
loop {
|
||||
poll.poll(&mut events, Some(timeout))
|
||||
.expect("failed polling");
|
||||
|
||||
for event in events.iter() {
|
||||
if (event.token() == token) & event.is_readable() {
|
||||
while let Ok(amt) = socket.recv(&mut buffer) {
|
||||
match Response::from_bytes(&buffer[0..amt], addr.is_ipv4()) {
|
||||
Ok(response) => {
|
||||
match response {
|
||||
Response::AnnounceIpv4(ref r) => {
|
||||
statistics.responses_announce += 1;
|
||||
statistics.response_peers += r.peers.len();
|
||||
}
|
||||
Response::AnnounceIpv6(ref r) => {
|
||||
statistics.responses_announce += 1;
|
||||
statistics.response_peers += r.peers.len();
|
||||
}
|
||||
Response::Scrape(_) => {
|
||||
statistics.responses_scrape += 1;
|
||||
}
|
||||
Response::Connect(_) => {
|
||||
statistics.responses_connect += 1;
|
||||
}
|
||||
Response::Error(_) => {
|
||||
statistics.responses_error += 1;
|
||||
}
|
||||
}
|
||||
|
||||
let opt_request = process_response(
|
||||
&mut rng,
|
||||
gamma,
|
||||
&state.info_hashes,
|
||||
&config,
|
||||
&mut torrent_peers,
|
||||
response,
|
||||
);
|
||||
|
||||
if let Some(request) = opt_request {
|
||||
send_request(&mut socket, &mut buffer, &mut statistics, request);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Received invalid response: {:#?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rng.gen::<f32>() <= config.requests.additional_request_probability {
|
||||
let additional_request =
|
||||
create_connect_request(generate_transaction_id(&mut rng));
|
||||
|
||||
send_request(
|
||||
&mut socket,
|
||||
&mut buffer,
|
||||
&mut statistics,
|
||||
additional_request,
|
||||
);
|
||||
}
|
||||
|
||||
update_shared_statistics(&state, &mut statistics);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn send_request(
|
||||
socket: &mut UdpSocket,
|
||||
buffer: &mut [u8],
|
||||
statistics: &mut SocketWorkerLocalStatistics,
|
||||
request: Request,
|
||||
) {
|
||||
let mut cursor = Cursor::new(buffer);
|
||||
|
||||
match request.write(&mut cursor) {
|
||||
Ok(()) => {
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.get_ref();
|
||||
|
||||
match socket.send(&inner[..position]) {
|
||||
Ok(_) => {
|
||||
statistics.requests += 1;
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Couldn't send packet: {:?}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("request_to_bytes err: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_shared_statistics(state: &LoadTestState, statistics: &mut SocketWorkerLocalStatistics) {
|
||||
state
|
||||
.statistics
|
||||
.requests
|
||||
.fetch_add(statistics.requests, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_connect
|
||||
.fetch_add(statistics.responses_connect, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_announce
|
||||
.fetch_add(statistics.responses_announce, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_scrape
|
||||
.fetch_add(statistics.responses_scrape, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.responses_error
|
||||
.fetch_add(statistics.responses_error, Ordering::Relaxed);
|
||||
state
|
||||
.statistics
|
||||
.response_peers
|
||||
.fetch_add(statistics.response_peers, Ordering::Relaxed);
|
||||
|
||||
*statistics = SocketWorkerLocalStatistics::default();
|
||||
}
|
||||
|
||||
fn create_socket(config: &Config, addr: SocketAddr) -> ::std::net::UdpSocket {
|
||||
let socket = if addr.is_ipv4() {
|
||||
Socket::new(Domain::IPV4, Type::DGRAM, Some(Protocol::UDP))
|
||||
} else {
|
||||
Socket::new(Domain::IPV6, Type::DGRAM, Some(Protocol::UDP))
|
||||
}
|
||||
.expect("create socket");
|
||||
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.expect("socket: set nonblocking");
|
||||
|
||||
if config.network.recv_buffer != 0 {
|
||||
if let Err(err) = socket.set_recv_buffer_size(config.network.recv_buffer) {
|
||||
eprintln!(
|
||||
"socket: failed setting recv buffer to {}: {:?}",
|
||||
config.network.recv_buffer, err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
socket
|
||||
.bind(&addr.into())
|
||||
.unwrap_or_else(|err| panic!("socket: bind to {}: {:?}", addr, err));
|
||||
|
||||
socket
|
||||
.connect(&config.server_address.into())
|
||||
.expect("socket: connect to server");
|
||||
|
||||
socket.into()
|
||||
}
|
||||
218
crates/udp_load_test/src/worker/request_gen.rs
Normal file
218
crates/udp_load_test/src/worker/request_gen.rs
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use rand::distributions::WeightedIndex;
|
||||
use rand::prelude::*;
|
||||
use rand_distr::Gamma;
|
||||
|
||||
use aquatic_udp_protocol::*;
|
||||
|
||||
use crate::common::*;
|
||||
use crate::config::Config;
|
||||
use crate::utils::*;
|
||||
|
||||
pub fn process_response(
|
||||
rng: &mut impl Rng,
|
||||
gamma: Gamma<f64>,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
config: &Config,
|
||||
torrent_peers: &mut TorrentPeerMap,
|
||||
response: Response,
|
||||
) -> Option<Request> {
|
||||
match response {
|
||||
Response::Connect(r) => {
|
||||
// Fetch the torrent peer or create it if is doesn't exists. Update
|
||||
// the connection id if fetched. Create a request and move the
|
||||
// torrent peer appropriately.
|
||||
|
||||
let torrent_peer = torrent_peers
|
||||
.remove(&r.transaction_id)
|
||||
.map(|mut torrent_peer| {
|
||||
torrent_peer.connection_id = r.connection_id;
|
||||
|
||||
torrent_peer
|
||||
})
|
||||
.unwrap_or_else(|| {
|
||||
create_torrent_peer(config, rng, gamma, info_hashes, r.connection_id)
|
||||
});
|
||||
|
||||
let new_transaction_id = generate_transaction_id(rng);
|
||||
|
||||
let request =
|
||||
create_random_request(config, rng, info_hashes, new_transaction_id, &torrent_peer);
|
||||
|
||||
torrent_peers.insert(new_transaction_id, torrent_peer);
|
||||
|
||||
Some(request)
|
||||
}
|
||||
Response::AnnounceIpv4(r) => if_torrent_peer_move_and_create_random_request(
|
||||
config,
|
||||
rng,
|
||||
info_hashes,
|
||||
torrent_peers,
|
||||
r.transaction_id,
|
||||
),
|
||||
Response::AnnounceIpv6(r) => if_torrent_peer_move_and_create_random_request(
|
||||
config,
|
||||
rng,
|
||||
info_hashes,
|
||||
torrent_peers,
|
||||
r.transaction_id,
|
||||
),
|
||||
Response::Scrape(r) => if_torrent_peer_move_and_create_random_request(
|
||||
config,
|
||||
rng,
|
||||
info_hashes,
|
||||
torrent_peers,
|
||||
r.transaction_id,
|
||||
),
|
||||
Response::Error(r) => {
|
||||
if !r.message.to_lowercase().contains("connection") {
|
||||
eprintln!(
|
||||
"Received error response which didn't contain the word 'connection': {}",
|
||||
r.message
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(torrent_peer) = torrent_peers.remove(&r.transaction_id) {
|
||||
let new_transaction_id = generate_transaction_id(rng);
|
||||
|
||||
torrent_peers.insert(new_transaction_id, torrent_peer);
|
||||
|
||||
Some(create_connect_request(new_transaction_id))
|
||||
} else {
|
||||
Some(create_connect_request(generate_transaction_id(rng)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn if_torrent_peer_move_and_create_random_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
torrent_peers: &mut TorrentPeerMap,
|
||||
transaction_id: TransactionId,
|
||||
) -> Option<Request> {
|
||||
if let Some(torrent_peer) = torrent_peers.remove(&transaction_id) {
|
||||
let new_transaction_id = generate_transaction_id(rng);
|
||||
|
||||
let request =
|
||||
create_random_request(config, rng, info_hashes, new_transaction_id, &torrent_peer);
|
||||
|
||||
torrent_peers.insert(new_transaction_id, torrent_peer);
|
||||
|
||||
Some(request)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn create_random_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
transaction_id: TransactionId,
|
||||
torrent_peer: &TorrentPeer,
|
||||
) -> Request {
|
||||
const ITEMS: [RequestType; 3] = [
|
||||
RequestType::Announce,
|
||||
RequestType::Connect,
|
||||
RequestType::Scrape,
|
||||
];
|
||||
|
||||
let weights = [
|
||||
config.requests.weight_announce as u32,
|
||||
config.requests.weight_connect as u32,
|
||||
config.requests.weight_scrape as u32,
|
||||
];
|
||||
|
||||
let dist = WeightedIndex::new(weights).expect("random request weighted index");
|
||||
|
||||
match ITEMS[dist.sample(rng)] {
|
||||
RequestType::Announce => create_announce_request(config, rng, torrent_peer, transaction_id),
|
||||
RequestType::Connect => create_connect_request(transaction_id),
|
||||
RequestType::Scrape => create_scrape_request(&info_hashes, torrent_peer, transaction_id),
|
||||
}
|
||||
}
|
||||
|
||||
fn create_announce_request(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
torrent_peer: &TorrentPeer,
|
||||
transaction_id: TransactionId,
|
||||
) -> Request {
|
||||
let (event, bytes_left) = {
|
||||
if rng.gen_bool(config.requests.peer_seeder_probability) {
|
||||
(AnnounceEvent::Completed, NumberOfBytes(0))
|
||||
} else {
|
||||
(AnnounceEvent::Started, NumberOfBytes(50))
|
||||
}
|
||||
};
|
||||
|
||||
(AnnounceRequest {
|
||||
connection_id: torrent_peer.connection_id,
|
||||
transaction_id,
|
||||
info_hash: torrent_peer.info_hash,
|
||||
peer_id: torrent_peer.peer_id,
|
||||
bytes_downloaded: NumberOfBytes(50),
|
||||
bytes_uploaded: NumberOfBytes(50),
|
||||
bytes_left,
|
||||
event,
|
||||
ip_address: None,
|
||||
key: PeerKey(12345),
|
||||
peers_wanted: NumberOfPeers(100),
|
||||
port: torrent_peer.port,
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
fn create_scrape_request(
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
torrent_peer: &TorrentPeer,
|
||||
transaction_id: TransactionId,
|
||||
) -> Request {
|
||||
let indeces = &torrent_peer.scrape_hash_indeces;
|
||||
|
||||
let mut scape_hashes = Vec::with_capacity(indeces.len());
|
||||
|
||||
for i in indeces {
|
||||
scape_hashes.push(info_hashes[*i].to_owned())
|
||||
}
|
||||
|
||||
(ScrapeRequest {
|
||||
connection_id: torrent_peer.connection_id,
|
||||
transaction_id,
|
||||
info_hashes: scape_hashes,
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
fn create_torrent_peer(
|
||||
config: &Config,
|
||||
rng: &mut impl Rng,
|
||||
gamma: Gamma<f64>,
|
||||
info_hashes: &Arc<Vec<InfoHash>>,
|
||||
connection_id: ConnectionId,
|
||||
) -> TorrentPeer {
|
||||
let num_scape_hashes = rng.gen_range(1..config.requests.scrape_max_torrents);
|
||||
|
||||
let mut scrape_hash_indeces = Vec::new();
|
||||
|
||||
for _ in 0..num_scape_hashes {
|
||||
scrape_hash_indeces.push(select_info_hash_index(config, rng, gamma))
|
||||
}
|
||||
|
||||
let info_hash_index = select_info_hash_index(config, rng, gamma);
|
||||
|
||||
TorrentPeer {
|
||||
info_hash: info_hashes[info_hash_index],
|
||||
scrape_hash_indeces,
|
||||
connection_id,
|
||||
peer_id: generate_peer_id(),
|
||||
port: Port(rng.gen()),
|
||||
}
|
||||
}
|
||||
|
||||
fn select_info_hash_index(config: &Config, rng: &mut impl Rng, gamma: Gamma<f64>) -> usize {
|
||||
gamma_usize(rng, gamma, config.requests.number_of_torrents - 1)
|
||||
}
|
||||
21
crates/udp_protocol/Cargo.toml
Normal file
21
crates/udp_protocol/Cargo.toml
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
[package]
|
||||
name = "aquatic_udp_protocol"
|
||||
description = "UDP BitTorrent tracker protocol"
|
||||
keywords = ["udp", "protocol", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
aquatic_peer_id.workspace = true
|
||||
|
||||
byteorder = "1"
|
||||
either = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
65
crates/udp_protocol/src/common.rs
Normal file
65
crates/udp_protocol/src/common.rs
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
use std::fmt::Debug;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
pub use aquatic_peer_id::{PeerClient, PeerId};
|
||||
|
||||
pub trait Ip: Clone + Copy + Debug + PartialEq + Eq {}
|
||||
|
||||
impl Ip for Ipv4Addr {}
|
||||
impl Ip for Ipv6Addr {}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct AnnounceInterval(pub i32);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct InfoHash(pub [u8; 20]);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct ConnectionId(pub i64);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct TransactionId(pub i32);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct NumberOfBytes(pub i64);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct NumberOfPeers(pub i32);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct NumberOfDownloads(pub i32);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct Port(pub u16);
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub struct PeerKey(pub u32);
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct ResponsePeer<I: Ip> {
|
||||
pub ip_address: I,
|
||||
pub port: Port,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl quickcheck::Arbitrary for InfoHash {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let mut bytes = [0u8; 20];
|
||||
|
||||
for byte in bytes.iter_mut() {
|
||||
*byte = u8::arbitrary(g);
|
||||
}
|
||||
|
||||
Self(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
impl<I: Ip + quickcheck::Arbitrary> quickcheck::Arbitrary for ResponsePeer<I> {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
ip_address: quickcheck::Arbitrary::arbitrary(g),
|
||||
port: Port(u16::arbitrary(g).into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
7
crates/udp_protocol/src/lib.rs
Normal file
7
crates/udp_protocol/src/lib.rs
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
pub mod common;
|
||||
pub mod request;
|
||||
pub mod response;
|
||||
|
||||
pub use self::common::*;
|
||||
pub use self::request::*;
|
||||
pub use self::response::*;
|
||||
399
crates/udp_protocol/src/request.rs
Normal file
399
crates/udp_protocol/src/request.rs
Normal file
|
|
@ -0,0 +1,399 @@
|
|||
use std::convert::TryInto;
|
||||
use std::io::{self, Cursor, Read, Write};
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
|
||||
use either::Either;
|
||||
|
||||
use aquatic_peer_id::PeerId;
|
||||
|
||||
use super::common::*;
|
||||
|
||||
const PROTOCOL_IDENTIFIER: i64 = 4_497_486_125_440;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
|
||||
pub enum AnnounceEvent {
|
||||
Started,
|
||||
Stopped,
|
||||
Completed,
|
||||
None,
|
||||
}
|
||||
|
||||
impl AnnounceEvent {
|
||||
#[inline]
|
||||
pub fn from_i32(i: i32) -> Self {
|
||||
match i {
|
||||
1 => Self::Completed,
|
||||
2 => Self::Started,
|
||||
3 => Self::Stopped,
|
||||
_ => Self::None,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_i32(&self) -> i32 {
|
||||
match self {
|
||||
AnnounceEvent::None => 0,
|
||||
AnnounceEvent::Completed => 1,
|
||||
AnnounceEvent::Started => 2,
|
||||
AnnounceEvent::Stopped => 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct ConnectRequest {
|
||||
pub transaction_id: TransactionId,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct AnnounceRequest {
|
||||
pub connection_id: ConnectionId,
|
||||
pub transaction_id: TransactionId,
|
||||
pub info_hash: InfoHash,
|
||||
pub peer_id: PeerId,
|
||||
pub bytes_downloaded: NumberOfBytes,
|
||||
pub bytes_uploaded: NumberOfBytes,
|
||||
pub bytes_left: NumberOfBytes,
|
||||
pub event: AnnounceEvent,
|
||||
pub ip_address: Option<Ipv4Addr>,
|
||||
pub key: PeerKey,
|
||||
pub peers_wanted: NumberOfPeers,
|
||||
pub port: Port,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct ScrapeRequest {
|
||||
pub connection_id: ConnectionId,
|
||||
pub transaction_id: TransactionId,
|
||||
pub info_hashes: Vec<InfoHash>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum RequestParseError {
|
||||
Sendable {
|
||||
connection_id: ConnectionId,
|
||||
transaction_id: TransactionId,
|
||||
err: Either<io::Error, &'static str>,
|
||||
},
|
||||
Unsendable {
|
||||
err: Either<io::Error, &'static str>,
|
||||
},
|
||||
}
|
||||
|
||||
impl RequestParseError {
|
||||
pub fn sendable_io(err: io::Error, connection_id: i64, transaction_id: i32) -> Self {
|
||||
Self::Sendable {
|
||||
connection_id: ConnectionId(connection_id),
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
err: Either::Left(err),
|
||||
}
|
||||
}
|
||||
pub fn sendable_text(text: &'static str, connection_id: i64, transaction_id: i32) -> Self {
|
||||
Self::Sendable {
|
||||
connection_id: ConnectionId(connection_id),
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
err: Either::Right(text),
|
||||
}
|
||||
}
|
||||
pub fn unsendable_io(err: io::Error) -> Self {
|
||||
Self::Unsendable {
|
||||
err: Either::Left(err),
|
||||
}
|
||||
}
|
||||
pub fn unsendable_text(text: &'static str) -> Self {
|
||||
Self::Unsendable {
|
||||
err: Either::Right(text),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub enum Request {
|
||||
Connect(ConnectRequest),
|
||||
Announce(AnnounceRequest),
|
||||
Scrape(ScrapeRequest),
|
||||
}
|
||||
|
||||
impl From<ConnectRequest> for Request {
|
||||
fn from(r: ConnectRequest) -> Self {
|
||||
Self::Connect(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AnnounceRequest> for Request {
|
||||
fn from(r: AnnounceRequest) -> Self {
|
||||
Self::Announce(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ScrapeRequest> for Request {
|
||||
fn from(r: ScrapeRequest) -> Self {
|
||||
Self::Scrape(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl Request {
|
||||
pub fn write(self, bytes: &mut impl Write) -> Result<(), io::Error> {
|
||||
match self {
|
||||
Request::Connect(r) => {
|
||||
bytes.write_i64::<NetworkEndian>(PROTOCOL_IDENTIFIER)?;
|
||||
bytes.write_i32::<NetworkEndian>(0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
}
|
||||
|
||||
Request::Announce(r) => {
|
||||
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(1)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
|
||||
bytes.write_all(&r.info_hash.0)?;
|
||||
bytes.write_all(&r.peer_id.0)?;
|
||||
|
||||
bytes.write_i64::<NetworkEndian>(r.bytes_downloaded.0)?;
|
||||
bytes.write_i64::<NetworkEndian>(r.bytes_left.0)?;
|
||||
bytes.write_i64::<NetworkEndian>(r.bytes_uploaded.0)?;
|
||||
|
||||
bytes.write_i32::<NetworkEndian>(r.event.to_i32())?;
|
||||
|
||||
bytes.write_all(&r.ip_address.map_or([0; 4], |ip| ip.octets()))?;
|
||||
|
||||
bytes.write_u32::<NetworkEndian>(r.key.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.peers_wanted.0)?;
|
||||
bytes.write_u16::<NetworkEndian>(r.port.0)?;
|
||||
}
|
||||
|
||||
Request::Scrape(r) => {
|
||||
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(2)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
|
||||
for info_hash in r.info_hashes {
|
||||
bytes.write_all(&info_hash.0)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: &[u8], max_scrape_torrents: u8) -> Result<Self, RequestParseError> {
|
||||
let mut cursor = Cursor::new(bytes);
|
||||
|
||||
let connection_id = cursor
|
||||
.read_i64::<NetworkEndian>()
|
||||
.map_err(RequestParseError::unsendable_io)?;
|
||||
let action = cursor
|
||||
.read_i32::<NetworkEndian>()
|
||||
.map_err(RequestParseError::unsendable_io)?;
|
||||
let transaction_id = cursor
|
||||
.read_i32::<NetworkEndian>()
|
||||
.map_err(RequestParseError::unsendable_io)?;
|
||||
|
||||
match action {
|
||||
// Connect
|
||||
0 => {
|
||||
if connection_id == PROTOCOL_IDENTIFIER {
|
||||
Ok((ConnectRequest {
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
})
|
||||
.into())
|
||||
} else {
|
||||
Err(RequestParseError::unsendable_text(
|
||||
"Protocol identifier missing",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Announce
|
||||
1 => {
|
||||
let mut info_hash = [0; 20];
|
||||
let mut peer_id = [0; 20];
|
||||
let mut ip = [0; 4];
|
||||
|
||||
cursor.read_exact(&mut info_hash).map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
cursor.read_exact(&mut peer_id).map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
|
||||
let bytes_downloaded = cursor.read_i64::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
let bytes_left = cursor.read_i64::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
let bytes_uploaded = cursor.read_i64::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
let event = cursor.read_i32::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
|
||||
cursor.read_exact(&mut ip).map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
|
||||
let key = cursor.read_u32::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
let peers_wanted = cursor.read_i32::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
let port = cursor.read_u16::<NetworkEndian>().map_err(|err| {
|
||||
RequestParseError::sendable_io(err, connection_id, transaction_id)
|
||||
})?;
|
||||
|
||||
let opt_ip = if ip == [0; 4] {
|
||||
None
|
||||
} else {
|
||||
Some(Ipv4Addr::from(ip))
|
||||
};
|
||||
|
||||
Ok((AnnounceRequest {
|
||||
connection_id: ConnectionId(connection_id),
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
info_hash: InfoHash(info_hash),
|
||||
peer_id: PeerId(peer_id),
|
||||
bytes_downloaded: NumberOfBytes(bytes_downloaded),
|
||||
bytes_uploaded: NumberOfBytes(bytes_uploaded),
|
||||
bytes_left: NumberOfBytes(bytes_left),
|
||||
event: AnnounceEvent::from_i32(event),
|
||||
ip_address: opt_ip,
|
||||
key: PeerKey(key),
|
||||
peers_wanted: NumberOfPeers(peers_wanted),
|
||||
port: Port(port),
|
||||
})
|
||||
.into())
|
||||
}
|
||||
|
||||
// Scrape
|
||||
2 => {
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.into_inner();
|
||||
|
||||
let info_hashes: Vec<InfoHash> = (&inner[position..])
|
||||
.chunks_exact(20)
|
||||
.take(max_scrape_torrents as usize)
|
||||
.map(|chunk| InfoHash(chunk.try_into().unwrap()))
|
||||
.collect();
|
||||
|
||||
if info_hashes.is_empty() {
|
||||
Err(RequestParseError::sendable_text(
|
||||
"Full scrapes are not allowed",
|
||||
connection_id,
|
||||
transaction_id,
|
||||
))
|
||||
} else {
|
||||
Ok((ScrapeRequest {
|
||||
connection_id: ConnectionId(connection_id),
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
info_hashes,
|
||||
})
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
_ => Err(RequestParseError::sendable_text(
|
||||
"Invalid action",
|
||||
connection_id,
|
||||
transaction_id,
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck::TestResult;
|
||||
use quickcheck_macros::quickcheck;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl quickcheck::Arbitrary for AnnounceEvent {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
match (bool::arbitrary(g), bool::arbitrary(g)) {
|
||||
(false, false) => Self::Started,
|
||||
(true, false) => Self::Started,
|
||||
(false, true) => Self::Completed,
|
||||
(true, true) => Self::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl quickcheck::Arbitrary for ConnectRequest {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
transaction_id: TransactionId(i32::arbitrary(g)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl quickcheck::Arbitrary for AnnounceRequest {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
connection_id: ConnectionId(i64::arbitrary(g)),
|
||||
transaction_id: TransactionId(i32::arbitrary(g)),
|
||||
info_hash: InfoHash::arbitrary(g),
|
||||
peer_id: PeerId::arbitrary(g),
|
||||
bytes_downloaded: NumberOfBytes(i64::arbitrary(g)),
|
||||
bytes_uploaded: NumberOfBytes(i64::arbitrary(g)),
|
||||
bytes_left: NumberOfBytes(i64::arbitrary(g)),
|
||||
event: AnnounceEvent::arbitrary(g),
|
||||
ip_address: None,
|
||||
key: PeerKey(u32::arbitrary(g)),
|
||||
peers_wanted: NumberOfPeers(i32::arbitrary(g)),
|
||||
port: Port(u16::arbitrary(g)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl quickcheck::Arbitrary for ScrapeRequest {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let info_hashes = (0..u8::arbitrary(g))
|
||||
.map(|_| InfoHash::arbitrary(g))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
connection_id: ConnectionId(i64::arbitrary(g)),
|
||||
transaction_id: TransactionId(i32::arbitrary(g)),
|
||||
info_hashes,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn same_after_conversion(request: Request) -> bool {
|
||||
let mut buf = Vec::new();
|
||||
|
||||
request.clone().write(&mut buf).unwrap();
|
||||
let r2 = Request::from_bytes(&buf[..], ::std::u8::MAX).unwrap();
|
||||
|
||||
let success = request == r2;
|
||||
|
||||
if !success {
|
||||
println!("before: {:#?}\nafter: {:#?}", request, r2);
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_connect_request_convert_identity(request: ConnectRequest) -> bool {
|
||||
same_after_conversion(request.into())
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_announce_request_convert_identity(request: AnnounceRequest) -> bool {
|
||||
same_after_conversion(request.into())
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_scrape_request_convert_identity(request: ScrapeRequest) -> TestResult {
|
||||
if request.info_hashes.is_empty() {
|
||||
return TestResult::discard();
|
||||
}
|
||||
|
||||
TestResult::from_bool(same_after_conversion(request.into()))
|
||||
}
|
||||
}
|
||||
356
crates/udp_protocol/src/response.rs
Normal file
356
crates/udp_protocol/src/response.rs
Normal file
|
|
@ -0,0 +1,356 @@
|
|||
use std::borrow::Cow;
|
||||
use std::convert::TryInto;
|
||||
use std::io::{self, Cursor, Write};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
use byteorder::{NetworkEndian, ReadBytesExt, WriteBytesExt};
|
||||
|
||||
use super::common::*;
|
||||
|
||||
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
|
||||
pub struct TorrentScrapeStatistics {
|
||||
pub seeders: NumberOfPeers,
|
||||
pub completed: NumberOfDownloads,
|
||||
pub leechers: NumberOfPeers,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct ConnectResponse {
|
||||
pub connection_id: ConnectionId,
|
||||
pub transaction_id: TransactionId,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct AnnounceResponse<I: Ip> {
|
||||
pub transaction_id: TransactionId,
|
||||
pub announce_interval: AnnounceInterval,
|
||||
pub leechers: NumberOfPeers,
|
||||
pub seeders: NumberOfPeers,
|
||||
pub peers: Vec<ResponsePeer<I>>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct ScrapeResponse {
|
||||
pub transaction_id: TransactionId,
|
||||
pub torrent_stats: Vec<TorrentScrapeStatistics>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub struct ErrorResponse {
|
||||
pub transaction_id: TransactionId,
|
||||
pub message: Cow<'static, str>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||
pub enum Response {
|
||||
Connect(ConnectResponse),
|
||||
AnnounceIpv4(AnnounceResponse<Ipv4Addr>),
|
||||
AnnounceIpv6(AnnounceResponse<Ipv6Addr>),
|
||||
Scrape(ScrapeResponse),
|
||||
Error(ErrorResponse),
|
||||
}
|
||||
|
||||
impl From<ConnectResponse> for Response {
|
||||
fn from(r: ConnectResponse) -> Self {
|
||||
Self::Connect(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AnnounceResponse<Ipv4Addr>> for Response {
|
||||
fn from(r: AnnounceResponse<Ipv4Addr>) -> Self {
|
||||
Self::AnnounceIpv4(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AnnounceResponse<Ipv6Addr>> for Response {
|
||||
fn from(r: AnnounceResponse<Ipv6Addr>) -> Self {
|
||||
Self::AnnounceIpv6(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ScrapeResponse> for Response {
|
||||
fn from(r: ScrapeResponse) -> Self {
|
||||
Self::Scrape(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ErrorResponse> for Response {
|
||||
fn from(r: ErrorResponse) -> Self {
|
||||
Self::Error(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl Response {
|
||||
#[inline]
|
||||
pub fn write(&self, bytes: &mut impl Write) -> Result<(), io::Error> {
|
||||
match self {
|
||||
Response::Connect(r) => {
|
||||
bytes.write_i32::<NetworkEndian>(0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
bytes.write_i64::<NetworkEndian>(r.connection_id.0)?;
|
||||
}
|
||||
Response::AnnounceIpv4(r) => {
|
||||
bytes.write_i32::<NetworkEndian>(1)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
|
||||
|
||||
for peer in r.peers.iter() {
|
||||
bytes.write_all(&peer.ip_address.octets())?;
|
||||
bytes.write_u16::<NetworkEndian>(peer.port.0)?;
|
||||
}
|
||||
}
|
||||
Response::AnnounceIpv6(r) => {
|
||||
bytes.write_i32::<NetworkEndian>(1)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.announce_interval.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.leechers.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.seeders.0)?;
|
||||
|
||||
for peer in r.peers.iter() {
|
||||
bytes.write_all(&peer.ip_address.octets())?;
|
||||
bytes.write_u16::<NetworkEndian>(peer.port.0)?;
|
||||
}
|
||||
}
|
||||
Response::Scrape(r) => {
|
||||
bytes.write_i32::<NetworkEndian>(2)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
|
||||
for torrent_stat in r.torrent_stats.iter() {
|
||||
bytes.write_i32::<NetworkEndian>(torrent_stat.seeders.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(torrent_stat.completed.0)?;
|
||||
bytes.write_i32::<NetworkEndian>(torrent_stat.leechers.0)?;
|
||||
}
|
||||
}
|
||||
Response::Error(r) => {
|
||||
bytes.write_i32::<NetworkEndian>(3)?;
|
||||
bytes.write_i32::<NetworkEndian>(r.transaction_id.0)?;
|
||||
|
||||
bytes.write_all(r.message.as_bytes())?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_bytes(bytes: &[u8], ipv4: bool) -> Result<Self, io::Error> {
|
||||
let mut cursor = Cursor::new(bytes);
|
||||
|
||||
let action = cursor.read_i32::<NetworkEndian>()?;
|
||||
let transaction_id = cursor.read_i32::<NetworkEndian>()?;
|
||||
|
||||
match action {
|
||||
// Connect
|
||||
0 => {
|
||||
let connection_id = cursor.read_i64::<NetworkEndian>()?;
|
||||
|
||||
Ok((ConnectResponse {
|
||||
connection_id: ConnectionId(connection_id),
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
})
|
||||
.into())
|
||||
}
|
||||
// Announce
|
||||
1 if ipv4 => {
|
||||
let announce_interval = cursor.read_i32::<NetworkEndian>()?;
|
||||
let leechers = cursor.read_i32::<NetworkEndian>()?;
|
||||
let seeders = cursor.read_i32::<NetworkEndian>()?;
|
||||
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.into_inner();
|
||||
|
||||
let peers = inner[position..]
|
||||
.chunks_exact(6)
|
||||
.map(|chunk| {
|
||||
let ip_bytes: [u8; 4] = (&chunk[..4]).try_into().unwrap();
|
||||
let ip_address = Ipv4Addr::from(ip_bytes);
|
||||
let port = (&chunk[4..]).read_u16::<NetworkEndian>().unwrap();
|
||||
|
||||
ResponsePeer {
|
||||
ip_address,
|
||||
port: Port(port),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok((AnnounceResponse {
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
announce_interval: AnnounceInterval(announce_interval),
|
||||
leechers: NumberOfPeers(leechers),
|
||||
seeders: NumberOfPeers(seeders),
|
||||
peers,
|
||||
})
|
||||
.into())
|
||||
}
|
||||
1 if !ipv4 => {
|
||||
let announce_interval = cursor.read_i32::<NetworkEndian>()?;
|
||||
let leechers = cursor.read_i32::<NetworkEndian>()?;
|
||||
let seeders = cursor.read_i32::<NetworkEndian>()?;
|
||||
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.into_inner();
|
||||
|
||||
let peers = inner[position..]
|
||||
.chunks_exact(18)
|
||||
.map(|chunk| {
|
||||
let ip_bytes: [u8; 16] = (&chunk[..16]).try_into().unwrap();
|
||||
let ip_address = Ipv6Addr::from(ip_bytes);
|
||||
let port = (&chunk[16..]).read_u16::<NetworkEndian>().unwrap();
|
||||
|
||||
ResponsePeer {
|
||||
ip_address,
|
||||
port: Port(port),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok((AnnounceResponse {
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
announce_interval: AnnounceInterval(announce_interval),
|
||||
leechers: NumberOfPeers(leechers),
|
||||
seeders: NumberOfPeers(seeders),
|
||||
peers,
|
||||
})
|
||||
.into())
|
||||
}
|
||||
// Scrape
|
||||
2 => {
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.into_inner();
|
||||
|
||||
let stats = inner[position..]
|
||||
.chunks_exact(12)
|
||||
.map(|chunk| {
|
||||
let mut cursor: Cursor<&[u8]> = Cursor::new(&chunk[..]);
|
||||
|
||||
let seeders = cursor.read_i32::<NetworkEndian>().unwrap();
|
||||
let downloads = cursor.read_i32::<NetworkEndian>().unwrap();
|
||||
let leechers = cursor.read_i32::<NetworkEndian>().unwrap();
|
||||
|
||||
TorrentScrapeStatistics {
|
||||
seeders: NumberOfPeers(seeders),
|
||||
completed: NumberOfDownloads(downloads),
|
||||
leechers: NumberOfPeers(leechers),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok((ScrapeResponse {
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
torrent_stats: stats,
|
||||
})
|
||||
.into())
|
||||
}
|
||||
// Error
|
||||
3 => {
|
||||
let position = cursor.position() as usize;
|
||||
let inner = cursor.into_inner();
|
||||
|
||||
Ok((ErrorResponse {
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
message: String::from_utf8_lossy(&inner[position..])
|
||||
.into_owned()
|
||||
.into(),
|
||||
})
|
||||
.into())
|
||||
}
|
||||
_ => Ok((ErrorResponse {
|
||||
transaction_id: TransactionId(transaction_id),
|
||||
message: "Invalid action".into(),
|
||||
})
|
||||
.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use quickcheck_macros::quickcheck;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl quickcheck::Arbitrary for TorrentScrapeStatistics {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
seeders: NumberOfPeers(i32::arbitrary(g)),
|
||||
completed: NumberOfDownloads(i32::arbitrary(g)),
|
||||
leechers: NumberOfPeers(i32::arbitrary(g)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl quickcheck::Arbitrary for ConnectResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
Self {
|
||||
connection_id: ConnectionId(i64::arbitrary(g)),
|
||||
transaction_id: TransactionId(i32::arbitrary(g)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I: Ip + quickcheck::Arbitrary> quickcheck::Arbitrary for AnnounceResponse<I> {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let peers = (0..u8::arbitrary(g))
|
||||
.map(|_| ResponsePeer::arbitrary(g))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
transaction_id: TransactionId(i32::arbitrary(g)),
|
||||
announce_interval: AnnounceInterval(i32::arbitrary(g)),
|
||||
leechers: NumberOfPeers(i32::arbitrary(g)),
|
||||
seeders: NumberOfPeers(i32::arbitrary(g)),
|
||||
peers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl quickcheck::Arbitrary for ScrapeResponse {
|
||||
fn arbitrary(g: &mut quickcheck::Gen) -> Self {
|
||||
let torrent_stats = (0..u8::arbitrary(g))
|
||||
.map(|_| TorrentScrapeStatistics::arbitrary(g))
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
transaction_id: TransactionId(i32::arbitrary(g)),
|
||||
torrent_stats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn same_after_conversion(response: Response, ipv4: bool) -> bool {
|
||||
let mut buf = Vec::new();
|
||||
|
||||
response.clone().write(&mut buf).unwrap();
|
||||
let r2 = Response::from_bytes(&buf[..], ipv4).unwrap();
|
||||
|
||||
let success = response == r2;
|
||||
|
||||
if !success {
|
||||
println!("before: {:#?}\nafter: {:#?}", response, r2);
|
||||
}
|
||||
|
||||
success
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_connect_response_convert_identity(response: ConnectResponse) -> bool {
|
||||
same_after_conversion(response.into(), true)
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_announce_response_ipv4_convert_identity(response: AnnounceResponse<Ipv4Addr>) -> bool {
|
||||
same_after_conversion(response.into(), true)
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_announce_response_ipv6_convert_identity(response: AnnounceResponse<Ipv6Addr>) -> bool {
|
||||
same_after_conversion(response.into(), false)
|
||||
}
|
||||
|
||||
#[quickcheck]
|
||||
fn test_scrape_response_convert_identity(response: ScrapeResponse) -> bool {
|
||||
same_after_conversion(response.into(), true)
|
||||
}
|
||||
}
|
||||
56
crates/ws/Cargo.toml
Normal file
56
crates/ws/Cargo.toml
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
[package]
|
||||
name = "aquatic_ws"
|
||||
description = "High-performance open WebTorrent tracker"
|
||||
keywords = ["webtorrent", "websocket", "peer-to-peer", "torrent", "bittorrent"]
|
||||
version.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
readme.workspace = true
|
||||
rust-version = "1.70"
|
||||
|
||||
[lib]
|
||||
name = "aquatic_ws"
|
||||
|
||||
[[bin]]
|
||||
name = "aquatic_ws"
|
||||
|
||||
[features]
|
||||
default = ["prometheus"]
|
||||
prometheus = ["metrics", "metrics-exporter-prometheus"]
|
||||
metrics = ["dep:metrics", "metrics-util"]
|
||||
|
||||
[dependencies]
|
||||
aquatic_common = { workspace = true, features = ["rustls", "glommio"] }
|
||||
aquatic_peer_id.workspace = true
|
||||
aquatic_toml_config.workspace = true
|
||||
aquatic_ws_protocol.workspace = true
|
||||
|
||||
anyhow = "1"
|
||||
async-tungstenite = "0.23"
|
||||
cfg-if = "1"
|
||||
futures = "0.3"
|
||||
futures-lite = "1"
|
||||
futures-rustls = "0.24"
|
||||
glommio = "0.8"
|
||||
hashbrown = { version = "0.14", features = ["serde"] }
|
||||
httparse = "1"
|
||||
log = "0.4"
|
||||
metrics = { version = "0.21", optional = true }
|
||||
metrics-util = { version = "0.15", optional = true }
|
||||
metrics-exporter-prometheus = { version = "0.12", optional = true, default-features = false, features = ["http-listener"] }
|
||||
mimalloc = { version = "0.1", default-features = false }
|
||||
privdrop = "0.5"
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
rustls = "0.21"
|
||||
rustls-pemfile = "1"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
signal-hook = { version = "0.3" }
|
||||
slab = "0.4"
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
tungstenite = "0.20"
|
||||
|
||||
[dev-dependencies]
|
||||
quickcheck = "1"
|
||||
quickcheck_macros = "1"
|
||||
76
crates/ws/src/common.rs
Normal file
76
crates/ws/src/common.rs
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
use std::{net::IpAddr, sync::Arc};
|
||||
|
||||
use aquatic_common::access_list::AccessListArcSwap;
|
||||
|
||||
pub use aquatic_common::ValidUntil;
|
||||
use aquatic_ws_protocol::{InfoHash, PeerId};
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum IpVersion {
|
||||
V4,
|
||||
V6,
|
||||
}
|
||||
|
||||
impl IpVersion {
|
||||
pub fn canonical_from_ip(ip: IpAddr) -> IpVersion {
|
||||
match ip {
|
||||
IpAddr::V4(_) => Self::V4,
|
||||
IpAddr::V6(addr) => match addr.octets() {
|
||||
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, _, _, _, _] => Self::V4,
|
||||
_ => Self::V6,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct State {
|
||||
pub access_list: Arc<AccessListArcSwap>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct PendingScrapeId(pub u8);
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ConsumerId(pub u8);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
pub struct ConnectionId(pub usize);
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct InMessageMeta {
|
||||
/// Index of socket worker responsible for this connection. Required for
|
||||
/// sending back response through correct channel to correct worker.
|
||||
pub out_message_consumer_id: ConsumerId,
|
||||
pub connection_id: ConnectionId,
|
||||
pub ip_version: IpVersion,
|
||||
pub pending_scrape_id: Option<PendingScrapeId>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct OutMessageMeta {
|
||||
/// Index of socket worker responsible for this connection. Required for
|
||||
/// sending back response through correct channel to correct worker.
|
||||
pub out_message_consumer_id: ConsumerId,
|
||||
pub connection_id: ConnectionId,
|
||||
pub pending_scrape_id: Option<PendingScrapeId>,
|
||||
}
|
||||
|
||||
impl Into<OutMessageMeta> for InMessageMeta {
|
||||
fn into(self) -> OutMessageMeta {
|
||||
OutMessageMeta {
|
||||
out_message_consumer_id: self.out_message_consumer_id,
|
||||
connection_id: self.connection_id,
|
||||
pending_scrape_id: self.pending_scrape_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub enum SwarmControlMessage {
|
||||
ConnectionClosed {
|
||||
info_hash: InfoHash,
|
||||
peer_id: PeerId,
|
||||
ip_version: IpVersion,
|
||||
},
|
||||
}
|
||||
191
crates/ws/src/config.rs
Normal file
191
crates/ws/src/config.rs
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use aquatic_common::cpu_pinning::asc::CpuPinningConfigAsc;
|
||||
use aquatic_common::{access_list::AccessListConfig, privileges::PrivilegeConfig};
|
||||
use serde::Deserialize;
|
||||
|
||||
use aquatic_common::cli::LogLevel;
|
||||
use aquatic_toml_config::TomlConfig;
|
||||
|
||||
/// aquatic_ws configuration
|
||||
///
|
||||
/// Running behind a reverse proxy is supported, but IPv4 peer requests have
|
||||
/// to be proxied to IPv4 requests, and IPv6 requests to IPv6 requests.
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct Config {
|
||||
/// Socket workers receive requests from the socket, parse them and send
|
||||
/// them on to the swarm workers. They then receive responses from the
|
||||
/// swarm workers, encode them and send them back over the socket.
|
||||
pub socket_workers: usize,
|
||||
/// Swarm workers receive a number of requests from socket workers,
|
||||
/// generate responses and send them back to the socket workers.
|
||||
pub swarm_workers: usize,
|
||||
pub log_level: LogLevel,
|
||||
pub network: NetworkConfig,
|
||||
pub protocol: ProtocolConfig,
|
||||
pub cleaning: CleaningConfig,
|
||||
pub privileges: PrivilegeConfig,
|
||||
pub access_list: AccessListConfig,
|
||||
#[cfg(feature = "metrics")]
|
||||
pub metrics: MetricsConfig,
|
||||
pub cpu_pinning: CpuPinningConfigAsc,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
socket_workers: 1,
|
||||
swarm_workers: 1,
|
||||
log_level: LogLevel::default(),
|
||||
network: NetworkConfig::default(),
|
||||
protocol: ProtocolConfig::default(),
|
||||
cleaning: CleaningConfig::default(),
|
||||
privileges: PrivilegeConfig::default(),
|
||||
access_list: AccessListConfig::default(),
|
||||
#[cfg(feature = "metrics")]
|
||||
metrics: Default::default(),
|
||||
cpu_pinning: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl aquatic_common::cli::Config for Config {
|
||||
fn get_log_level(&self) -> Option<LogLevel> {
|
||||
Some(self.log_level)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct NetworkConfig {
|
||||
/// Bind to this address
|
||||
pub address: SocketAddr,
|
||||
/// Only allow access over IPv6
|
||||
pub only_ipv6: bool,
|
||||
/// Maximum number of pending TCP connections
|
||||
pub tcp_backlog: i32,
|
||||
|
||||
/// Enable TLS
|
||||
pub enable_tls: bool,
|
||||
/// Path to TLS certificate (DER-encoded X.509)
|
||||
pub tls_certificate_path: PathBuf,
|
||||
/// Path to TLS private key (DER-encoded ASN.1 in PKCS#8 or PKCS#1 format)
|
||||
pub tls_private_key_path: PathBuf,
|
||||
|
||||
pub websocket_max_message_size: usize,
|
||||
pub websocket_max_frame_size: usize,
|
||||
pub websocket_write_buffer_size: usize,
|
||||
|
||||
/// Return a HTTP 200 Ok response when receiving GET /health. Can not be
|
||||
/// combined with enable_tls.
|
||||
pub enable_http_health_checks: bool,
|
||||
}
|
||||
|
||||
impl Default for NetworkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
address: SocketAddr::from(([0, 0, 0, 0], 3000)),
|
||||
only_ipv6: false,
|
||||
tcp_backlog: 1024,
|
||||
|
||||
enable_tls: false,
|
||||
tls_certificate_path: "".into(),
|
||||
tls_private_key_path: "".into(),
|
||||
|
||||
websocket_max_message_size: 64 * 1024,
|
||||
websocket_max_frame_size: 16 * 1024,
|
||||
websocket_write_buffer_size: 8 * 1024,
|
||||
|
||||
enable_http_health_checks: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct ProtocolConfig {
|
||||
/// Maximum number of torrents to accept in scrape request
|
||||
pub max_scrape_torrents: usize,
|
||||
/// Maximum number of offers to accept in announce request
|
||||
pub max_offers: usize,
|
||||
/// Ask peers to announce this often (seconds)
|
||||
pub peer_announce_interval: usize,
|
||||
}
|
||||
|
||||
impl Default for ProtocolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_scrape_torrents: 255,
|
||||
max_offers: 10,
|
||||
peer_announce_interval: 120,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct CleaningConfig {
|
||||
/// Clean peers this often (seconds)
|
||||
pub torrent_cleaning_interval: u64,
|
||||
/// Remove peers that have not announced for this long (seconds)
|
||||
pub max_peer_age: u32,
|
||||
// Clean connections this often (seconds)
|
||||
pub connection_cleaning_interval: u64,
|
||||
/// Close connections if no responses have been sent to them for this long (seconds)
|
||||
pub max_connection_idle: u32,
|
||||
}
|
||||
|
||||
impl Default for CleaningConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
torrent_cleaning_interval: 30,
|
||||
max_peer_age: 1800,
|
||||
max_connection_idle: 60 * 5,
|
||||
connection_cleaning_interval: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
#[derive(Clone, Debug, PartialEq, TomlConfig, Deserialize)]
|
||||
#[serde(default, deny_unknown_fields)]
|
||||
pub struct MetricsConfig {
|
||||
/// Run a prometheus endpoint
|
||||
pub run_prometheus_endpoint: bool,
|
||||
/// Address to run prometheus endpoint on
|
||||
pub prometheus_endpoint_address: SocketAddr,
|
||||
/// Update metrics for torrent count this often (seconds)
|
||||
pub torrent_count_update_interval: u64,
|
||||
/// Serve information on peer clients
|
||||
///
|
||||
/// Expect a certain CPU hit
|
||||
pub peer_clients: bool,
|
||||
/// Serve information on all peer id prefixes
|
||||
///
|
||||
/// Requires `peer_clients` to be activated.
|
||||
///
|
||||
/// Expect a certain CPU hit
|
||||
pub peer_id_prefixes: bool,
|
||||
}
|
||||
|
||||
#[cfg(feature = "metrics")]
|
||||
impl Default for MetricsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
run_prometheus_endpoint: false,
|
||||
prometheus_endpoint_address: SocketAddr::from(([0, 0, 0, 0], 9000)),
|
||||
torrent_count_update_interval: 10,
|
||||
peer_clients: false,
|
||||
peer_id_prefixes: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Config;
|
||||
|
||||
::aquatic_toml_config::gen_serialize_deserialize_test!(Config);
|
||||
}
|
||||
197
crates/ws/src/lib.rs
Normal file
197
crates/ws/src/lib.rs
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
pub mod common;
|
||||
pub mod config;
|
||||
pub mod workers;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Context;
|
||||
use aquatic_common::cpu_pinning::glommio::{get_worker_placement, set_affinity_for_util_worker};
|
||||
use aquatic_common::cpu_pinning::WorkerIndex;
|
||||
use aquatic_common::rustls_config::create_rustls_config;
|
||||
use aquatic_common::{PanicSentinelWatcher, ServerStartInstant};
|
||||
use glommio::{channels::channel_mesh::MeshBuilder, prelude::*};
|
||||
use signal_hook::{
|
||||
consts::{SIGTERM, SIGUSR1},
|
||||
iterator::Signals,
|
||||
};
|
||||
|
||||
use aquatic_common::access_list::update_access_list;
|
||||
use aquatic_common::privileges::PrivilegeDropper;
|
||||
|
||||
use common::*;
|
||||
use config::Config;
|
||||
|
||||
pub const APP_NAME: &str = "aquatic_ws: WebTorrent tracker";
|
||||
pub const APP_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
pub const SHARED_IN_CHANNEL_SIZE: usize = 1024;
|
||||
|
||||
pub fn run(config: Config) -> ::anyhow::Result<()> {
|
||||
if config.network.enable_tls && config.network.enable_http_health_checks {
|
||||
return Err(anyhow::anyhow!(
|
||||
"configuration: network.enable_tls and network.enable_http_health_check can't both be set to true"
|
||||
));
|
||||
}
|
||||
|
||||
let mut signals = Signals::new([SIGUSR1, SIGTERM])?;
|
||||
|
||||
#[cfg(feature = "prometheus")]
|
||||
if config.metrics.run_prometheus_endpoint {
|
||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||
|
||||
let idle_timeout = config
|
||||
.cleaning
|
||||
.connection_cleaning_interval
|
||||
.max(config.cleaning.torrent_cleaning_interval)
|
||||
.max(config.metrics.torrent_count_update_interval)
|
||||
* 2;
|
||||
|
||||
PrometheusBuilder::new()
|
||||
.idle_timeout(
|
||||
metrics_util::MetricKindMask::GAUGE,
|
||||
Some(Duration::from_secs(idle_timeout)),
|
||||
)
|
||||
.with_http_listener(config.metrics.prometheus_endpoint_address)
|
||||
.install()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Install prometheus endpoint on {}",
|
||||
config.metrics.prometheus_endpoint_address
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
let state = State::default();
|
||||
|
||||
update_access_list(&config.access_list, &state.access_list)?;
|
||||
|
||||
let num_peers = config.socket_workers + config.swarm_workers;
|
||||
|
||||
let request_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE);
|
||||
let response_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE * 16);
|
||||
let control_mesh_builder = MeshBuilder::partial(num_peers, SHARED_IN_CHANNEL_SIZE);
|
||||
|
||||
let (sentinel_watcher, sentinel) = PanicSentinelWatcher::create_with_sentinel();
|
||||
let priv_dropper = PrivilegeDropper::new(config.privileges.clone(), config.socket_workers);
|
||||
|
||||
let opt_tls_config = if config.network.enable_tls {
|
||||
Some(Arc::new(
|
||||
create_rustls_config(
|
||||
&config.network.tls_certificate_path,
|
||||
&config.network.tls_private_key_path,
|
||||
)
|
||||
.with_context(|| "create rustls config")?,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let server_start_instant = ServerStartInstant::new();
|
||||
|
||||
let mut executors = Vec::new();
|
||||
|
||||
for i in 0..(config.socket_workers) {
|
||||
let sentinel = sentinel.clone();
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let opt_tls_config = opt_tls_config.clone();
|
||||
let control_mesh_builder = control_mesh_builder.clone();
|
||||
let request_mesh_builder = request_mesh_builder.clone();
|
||||
let response_mesh_builder = response_mesh_builder.clone();
|
||||
let priv_dropper = priv_dropper.clone();
|
||||
|
||||
let placement = get_worker_placement(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::SocketWorker(i),
|
||||
)?;
|
||||
let builder = LocalExecutorBuilder::new(placement).name(&format!("socket-{:02}", i + 1));
|
||||
|
||||
let executor = builder
|
||||
.spawn(move || async move {
|
||||
workers::socket::run_socket_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
opt_tls_config,
|
||||
control_mesh_builder,
|
||||
request_mesh_builder,
|
||||
response_mesh_builder,
|
||||
priv_dropper,
|
||||
server_start_instant,
|
||||
i,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.map_err(|err| anyhow::anyhow!("Spawning executor failed: {:#}", err))?;
|
||||
|
||||
executors.push(executor);
|
||||
}
|
||||
|
||||
::log::info!("spawned socket workers");
|
||||
|
||||
for i in 0..(config.swarm_workers) {
|
||||
let sentinel = sentinel.clone();
|
||||
let config = config.clone();
|
||||
let state = state.clone();
|
||||
let control_mesh_builder = control_mesh_builder.clone();
|
||||
let request_mesh_builder = request_mesh_builder.clone();
|
||||
let response_mesh_builder = response_mesh_builder.clone();
|
||||
|
||||
let placement = get_worker_placement(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
WorkerIndex::SwarmWorker(i),
|
||||
)?;
|
||||
let builder = LocalExecutorBuilder::new(placement).name(&format!("swarm-{:02}", i + 1));
|
||||
|
||||
let executor = builder
|
||||
.spawn(move || async move {
|
||||
workers::swarm::run_swarm_worker(
|
||||
sentinel,
|
||||
config,
|
||||
state,
|
||||
control_mesh_builder,
|
||||
request_mesh_builder,
|
||||
response_mesh_builder,
|
||||
server_start_instant,
|
||||
i,
|
||||
)
|
||||
.await
|
||||
})
|
||||
.map_err(|err| anyhow::anyhow!("Spawning executor failed: {:#}", err))?;
|
||||
|
||||
executors.push(executor);
|
||||
}
|
||||
|
||||
::log::info!("spawned swarm workers");
|
||||
|
||||
if config.cpu_pinning.active {
|
||||
set_affinity_for_util_worker(
|
||||
&config.cpu_pinning,
|
||||
config.socket_workers,
|
||||
config.swarm_workers,
|
||||
)?;
|
||||
}
|
||||
|
||||
for signal in &mut signals {
|
||||
match signal {
|
||||
SIGUSR1 => {
|
||||
let _ = update_access_list(&config.access_list, &state.access_list);
|
||||
}
|
||||
SIGTERM => {
|
||||
if sentinel_watcher.panic_was_triggered() {
|
||||
return Err(anyhow::anyhow!("worker thread panicked"));
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
14
crates/ws/src/main.rs
Normal file
14
crates/ws/src/main.rs
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
use aquatic_common::cli::run_app_with_cli_and_config;
|
||||
use aquatic_ws::config::Config;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn main() {
|
||||
run_app_with_cli_and_config::<Config>(
|
||||
aquatic_ws::APP_NAME,
|
||||
aquatic_ws::APP_VERSION,
|
||||
aquatic_ws::run,
|
||||
None,
|
||||
)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue